Merge branch 'kvm-ppc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
This fix was intended for 4.13, but didn't get in because both maintainers were on vacation. Paul Mackerras: "It adds mutual exclusion between list_add_rcu and list_del_rcu calls on the kvm->arch.spapr_tce_tables list. Without this, userspace could potentially trigger corruption of the list and cause a host crash or worse."
This commit is contained in:
Коммит
5f54c8b2d4
|
@ -27,5 +27,11 @@ You have to add the following kernel parameters in your elilo.conf:
|
|||
Macbook Pro 17", iMac 20" :
|
||||
video=efifb:i20
|
||||
|
||||
Accepted options:
|
||||
|
||||
nowc Don't map the framebuffer write combined. This can be used
|
||||
to workaround side-effects and slowdowns on other CPU cores
|
||||
when large amounts of console data are written.
|
||||
|
||||
--
|
||||
Edgar Hucek <gimli@dark-green.com>
|
||||
|
|
|
@ -459,7 +459,7 @@ pin controller?
|
|||
|
||||
This is done by registering "ranges" of pins, which are essentially
|
||||
cross-reference tables. These are described in
|
||||
Documentation/pinctrl.txt
|
||||
Documentation/driver-api/pinctl.rst
|
||||
|
||||
While the pin allocation is totally managed by the pinctrl subsystem,
|
||||
gpio (under gpiolib) is still maintained by gpio drivers. It may happen
|
||||
|
|
|
@ -58,20 +58,23 @@ Symbols/Function Pointers
|
|||
%ps versatile_init
|
||||
%pB prev_fn_of_versatile_init+0x88/0x88
|
||||
|
||||
For printing symbols and function pointers. The ``S`` and ``s`` specifiers
|
||||
result in the symbol name with (``S``) or without (``s``) offsets. Where
|
||||
this is used on a kernel without KALLSYMS - the symbol address is
|
||||
printed instead.
|
||||
The ``F`` and ``f`` specifiers are for printing function pointers,
|
||||
for example, f->func, &gettimeofday. They have the same result as
|
||||
``S`` and ``s`` specifiers. But they do an extra conversion on
|
||||
ia64, ppc64 and parisc64 architectures where the function pointers
|
||||
are actually function descriptors.
|
||||
|
||||
The ``S`` and ``s`` specifiers can be used for printing symbols
|
||||
from direct addresses, for example, __builtin_return_address(0),
|
||||
(void *)regs->ip. They result in the symbol name with (``S``) or
|
||||
without (``s``) offsets. If KALLSYMS are disabled then the symbol
|
||||
address is printed instead.
|
||||
|
||||
The ``B`` specifier results in the symbol name with offsets and should be
|
||||
used when printing stack backtraces. The specifier takes into
|
||||
consideration the effect of compiler optimisations which may occur
|
||||
when tail-call``s are used and marked with the noreturn GCC attribute.
|
||||
|
||||
On ia64, ppc64 and parisc64 architectures function pointers are
|
||||
actually function descriptors which must first be resolved. The ``F`` and
|
||||
``f`` specifiers perform this resolution and then provide the same
|
||||
functionality as the ``S`` and ``s`` specifiers.
|
||||
|
||||
Kernel Pointers
|
||||
===============
|
||||
|
|
|
@ -1161,7 +1161,7 @@ M: Brendan Higgins <brendanhiggins@google.com>
|
|||
R: Benjamin Herrenschmidt <benh@kernel.crashing.org>
|
||||
R: Joel Stanley <joel@jms.id.au>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
L: openbmc@lists.ozlabs.org
|
||||
L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: drivers/irqchip/irq-aspeed-i2c-ic.c
|
||||
F: drivers/i2c/busses/i2c-aspeed.c
|
||||
|
@ -5834,7 +5834,7 @@ F: drivers/staging/greybus/spi.c
|
|||
F: drivers/staging/greybus/spilib.c
|
||||
F: drivers/staging/greybus/spilib.h
|
||||
|
||||
GREYBUS LOOBACK/TIME PROTOCOLS DRIVERS
|
||||
GREYBUS LOOPBACK/TIME PROTOCOLS DRIVERS
|
||||
M: Bryan O'Donoghue <pure.logic@nexus-software.ie>
|
||||
S: Maintained
|
||||
F: drivers/staging/greybus/loopback.c
|
||||
|
@ -7110,7 +7110,6 @@ M: Marc Zyngier <marc.zyngier@arm.com>
|
|||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
|
||||
T: git git://git.infradead.org/users/jcooper/linux.git irqchip/core
|
||||
F: Documentation/devicetree/bindings/interrupt-controller/
|
||||
F: drivers/irqchip/
|
||||
|
||||
|
@ -10398,7 +10397,7 @@ L: linux-gpio@vger.kernel.org
|
|||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/pinctrl/
|
||||
F: Documentation/pinctrl.txt
|
||||
F: Documentation/driver-api/pinctl.rst
|
||||
F: drivers/pinctrl/
|
||||
F: include/linux/pinctrl/
|
||||
|
||||
|
@ -14019,6 +14018,7 @@ F: drivers/block/virtio_blk.c
|
|||
F: include/linux/virtio*.h
|
||||
F: include/uapi/linux/virtio_*.h
|
||||
F: drivers/crypto/virtio/
|
||||
F: mm/balloon_compaction.c
|
||||
|
||||
VIRTIO CRYPTO DRIVER
|
||||
M: Gonglei <arei.gonglei@huawei.com>
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 13
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -297,6 +297,7 @@
|
|||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
status = "disabled";
|
||||
ranges;
|
||||
|
||||
adc: adc@50030800 {
|
||||
compatible = "fsl,imx25-gcq";
|
||||
|
|
|
@ -507,7 +507,7 @@
|
|||
pinctrl_pcie: pciegrp {
|
||||
fsl,pins = <
|
||||
/* PCIe reset */
|
||||
MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0x030b0
|
||||
MX6QDL_PAD_EIM_DA0__GPIO3_IO00 0x030b0
|
||||
MX6QDL_PAD_EIM_DA4__GPIO3_IO04 0x030b0
|
||||
>;
|
||||
};
|
||||
|
@ -668,7 +668,7 @@
|
|||
&pcie {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_pcie>;
|
||||
reset-gpio = <&gpio6 31 GPIO_ACTIVE_LOW>;
|
||||
reset-gpio = <&gpio3 0 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
|
|
@ -557,6 +557,14 @@
|
|||
>;
|
||||
};
|
||||
|
||||
pinctrl_spi4: spi4grp {
|
||||
fsl,pins = <
|
||||
MX7D_PAD_GPIO1_IO09__GPIO1_IO9 0x59
|
||||
MX7D_PAD_GPIO1_IO12__GPIO1_IO12 0x59
|
||||
MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x59
|
||||
>;
|
||||
};
|
||||
|
||||
pinctrl_tsc2046_pendown: tsc2046_pendown {
|
||||
fsl,pins = <
|
||||
MX7D_PAD_EPDC_BDR1__GPIO2_IO29 0x59
|
||||
|
@ -697,13 +705,5 @@
|
|||
fsl,pins = <
|
||||
MX7D_PAD_LPSR_GPIO1_IO01__PWM1_OUT 0x110b0
|
||||
>;
|
||||
|
||||
pinctrl_spi4: spi4grp {
|
||||
fsl,pins = <
|
||||
MX7D_PAD_GPIO1_IO09__GPIO1_IO9 0x59
|
||||
MX7D_PAD_GPIO1_IO12__GPIO1_IO12 0x59
|
||||
MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x59
|
||||
>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -303,7 +303,7 @@
|
|||
#size-cells = <1>;
|
||||
atmel,smc = <&hsmc>;
|
||||
reg = <0x10000000 0x10000000
|
||||
0x40000000 0x30000000>;
|
||||
0x60000000 0x30000000>;
|
||||
ranges = <0x0 0x0 0x10000000 0x10000000
|
||||
0x1 0x0 0x60000000 0x10000000
|
||||
0x2 0x0 0x70000000 0x10000000
|
||||
|
@ -1048,18 +1048,18 @@
|
|||
};
|
||||
|
||||
hsmc: hsmc@f8014000 {
|
||||
compatible = "atmel,sama5d3-smc", "syscon", "simple-mfd";
|
||||
compatible = "atmel,sama5d2-smc", "syscon", "simple-mfd";
|
||||
reg = <0xf8014000 0x1000>;
|
||||
interrupts = <5 IRQ_TYPE_LEVEL_HIGH 6>;
|
||||
interrupts = <17 IRQ_TYPE_LEVEL_HIGH 6>;
|
||||
clocks = <&hsmc_clk>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges;
|
||||
|
||||
pmecc: ecc-engine@ffffc070 {
|
||||
pmecc: ecc-engine@f8014070 {
|
||||
compatible = "atmel,sama5d2-pmecc";
|
||||
reg = <0xffffc070 0x490>,
|
||||
<0xffffc500 0x100>;
|
||||
reg = <0xf8014070 0x490>,
|
||||
<0xf8014500 0x100>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -148,7 +148,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
|
|||
}
|
||||
|
||||
static inline void
|
||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
|
||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
tlb->mm = mm;
|
||||
tlb->fullmm = !(start | (end+1));
|
||||
|
@ -166,8 +167,14 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
|
|||
}
|
||||
|
||||
static inline void
|
||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end, bool force)
|
||||
{
|
||||
if (force) {
|
||||
tlb->range_start = start;
|
||||
tlb->range_end = end;
|
||||
}
|
||||
|
||||
tlb_flush_mmu(tlb);
|
||||
|
||||
/* keep the page table cache within bounds */
|
||||
|
|
|
@ -51,6 +51,7 @@
|
|||
compatible = "sinovoip,bananapi-m64", "allwinner,sun50i-a64";
|
||||
|
||||
aliases {
|
||||
ethernet0 = &emac;
|
||||
serial0 = &uart0;
|
||||
serial1 = &uart1;
|
||||
};
|
||||
|
|
|
@ -51,6 +51,7 @@
|
|||
compatible = "pine64,pine64", "allwinner,sun50i-a64";
|
||||
|
||||
aliases {
|
||||
ethernet0 = &emac;
|
||||
serial0 = &uart0;
|
||||
serial1 = &uart1;
|
||||
serial2 = &uart2;
|
||||
|
|
|
@ -53,6 +53,7 @@
|
|||
"allwinner,sun50i-a64";
|
||||
|
||||
aliases {
|
||||
ethernet0 = &emac;
|
||||
serial0 = &uart0;
|
||||
};
|
||||
|
||||
|
|
|
@ -120,5 +120,8 @@
|
|||
};
|
||||
|
||||
&pio {
|
||||
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
|
||||
compatible = "allwinner,sun50i-h5-pinctrl";
|
||||
};
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
stdout-path = "serial0:115200n8";
|
||||
};
|
||||
|
||||
audio_clkout: audio_clkout {
|
||||
audio_clkout: audio-clkout {
|
||||
/*
|
||||
* This is same as <&rcar_sound 0>
|
||||
* but needed to avoid cs2000/rcar_sound probe dead-lock
|
||||
|
|
|
@ -65,13 +65,13 @@ DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
|
|||
u64 _val; \
|
||||
if (needs_unstable_timer_counter_workaround()) { \
|
||||
const struct arch_timer_erratum_workaround *wa; \
|
||||
preempt_disable(); \
|
||||
preempt_disable_notrace(); \
|
||||
wa = __this_cpu_read(timer_unstable_counter_workaround); \
|
||||
if (wa && wa->read_##reg) \
|
||||
_val = wa->read_##reg(); \
|
||||
else \
|
||||
_val = read_sysreg(reg); \
|
||||
preempt_enable(); \
|
||||
preempt_enable_notrace(); \
|
||||
} else { \
|
||||
_val = read_sysreg(reg); \
|
||||
} \
|
||||
|
|
|
@ -114,10 +114,10 @@
|
|||
|
||||
/*
|
||||
* This is the base location for PIE (ET_DYN with INTERP) loads. On
|
||||
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
|
||||
* 64-bit, this is above 4GB to leave the entire 32-bit address
|
||||
* space open for things that want to use the area for 32-bit pointers.
|
||||
*/
|
||||
#define ELF_ET_DYN_BASE 0x100000000UL
|
||||
#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
|
|
@ -168,7 +168,8 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
|
|||
|
||||
|
||||
static inline void
|
||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
|
||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
tlb->mm = mm;
|
||||
tlb->max = ARRAY_SIZE(tlb->local);
|
||||
|
@ -185,8 +186,11 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
|
|||
* collected.
|
||||
*/
|
||||
static inline void
|
||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end, bool force)
|
||||
{
|
||||
if (force)
|
||||
tlb->need_flush = 1;
|
||||
/*
|
||||
* Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
|
||||
* tlb->end_addr.
|
||||
|
|
|
@ -2260,7 +2260,7 @@ config CPU_R4K_CACHE_TLB
|
|||
|
||||
config MIPS_MT_SMP
|
||||
bool "MIPS MT SMP support (1 TC on each available VPE)"
|
||||
depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6
|
||||
depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 && !CPU_MICROMIPS
|
||||
select CPU_MIPSR2_IRQ_VI
|
||||
select CPU_MIPSR2_IRQ_EI
|
||||
select SYNC_R4K
|
||||
|
|
|
@ -243,8 +243,21 @@ include arch/mips/Kbuild.platforms
|
|||
ifdef CONFIG_PHYSICAL_START
|
||||
load-y = $(CONFIG_PHYSICAL_START)
|
||||
endif
|
||||
entry-y = 0x$(shell $(NM) vmlinux 2>/dev/null \
|
||||
|
||||
entry-noisa-y = 0x$(shell $(NM) vmlinux 2>/dev/null \
|
||||
| grep "\bkernel_entry\b" | cut -f1 -d \ )
|
||||
ifdef CONFIG_CPU_MICROMIPS
|
||||
#
|
||||
# Set the ISA bit, since the kernel_entry symbol in the ELF will have it
|
||||
# clear which would lead to images containing addresses which bootloaders may
|
||||
# jump to as MIPS32 code.
|
||||
#
|
||||
entry-y = $(patsubst %0,%1,$(patsubst %2,%3,$(patsubst %4,%5, \
|
||||
$(patsubst %6,%7,$(patsubst %8,%9,$(patsubst %a,%b, \
|
||||
$(patsubst %c,%d,$(patsubst %e,%f,$(entry-noisa-y)))))))))
|
||||
else
|
||||
entry-y = $(entry-noisa-y)
|
||||
endif
|
||||
|
||||
cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
|
||||
drivers-$(CONFIG_PCI) += arch/mips/pci/
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
ashldi3.c
|
||||
bswapsi.c
|
|
@ -13,9 +13,9 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/octeon/octeon.h>
|
||||
#include <asm/octeon/cvmx-gpio-defs.h>
|
||||
|
||||
/* USB Control Register */
|
||||
union cvm_usbdrd_uctl_ctl {
|
||||
|
|
|
@ -147,23 +147,12 @@
|
|||
* Find irq with highest priority
|
||||
*/
|
||||
# open coded PTR_LA t1, cpu_mask_nr_tbl
|
||||
#if (_MIPS_SZPTR == 32)
|
||||
#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
|
||||
# open coded la t1, cpu_mask_nr_tbl
|
||||
lui t1, %hi(cpu_mask_nr_tbl)
|
||||
addiu t1, %lo(cpu_mask_nr_tbl)
|
||||
|
||||
#endif
|
||||
#if (_MIPS_SZPTR == 64)
|
||||
# open coded dla t1, cpu_mask_nr_tbl
|
||||
.set push
|
||||
.set noat
|
||||
lui t1, %highest(cpu_mask_nr_tbl)
|
||||
lui AT, %hi(cpu_mask_nr_tbl)
|
||||
daddiu t1, t1, %higher(cpu_mask_nr_tbl)
|
||||
daddiu AT, AT, %lo(cpu_mask_nr_tbl)
|
||||
dsll t1, 32
|
||||
daddu t1, t1, AT
|
||||
.set pop
|
||||
#else
|
||||
#error GCC `-msym32' option required for 64-bit DECstation builds
|
||||
#endif
|
||||
1: lw t2,(t1)
|
||||
nop
|
||||
|
@ -214,23 +203,12 @@
|
|||
* Find irq with highest priority
|
||||
*/
|
||||
# open coded PTR_LA t1,asic_mask_nr_tbl
|
||||
#if (_MIPS_SZPTR == 32)
|
||||
#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
|
||||
# open coded la t1, asic_mask_nr_tbl
|
||||
lui t1, %hi(asic_mask_nr_tbl)
|
||||
addiu t1, %lo(asic_mask_nr_tbl)
|
||||
|
||||
#endif
|
||||
#if (_MIPS_SZPTR == 64)
|
||||
# open coded dla t1, asic_mask_nr_tbl
|
||||
.set push
|
||||
.set noat
|
||||
lui t1, %highest(asic_mask_nr_tbl)
|
||||
lui AT, %hi(asic_mask_nr_tbl)
|
||||
daddiu t1, t1, %higher(asic_mask_nr_tbl)
|
||||
daddiu AT, AT, %lo(asic_mask_nr_tbl)
|
||||
dsll t1, 32
|
||||
daddu t1, t1, AT
|
||||
.set pop
|
||||
#else
|
||||
#error GCC `-msym32' option required for 64-bit DECstation builds
|
||||
#endif
|
||||
2: lw t2,(t1)
|
||||
nop
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
#ifndef _ASM_CACHE_H
|
||||
#define _ASM_CACHE_H
|
||||
|
||||
#include <kmalloc.h>
|
||||
|
||||
#define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
|
||||
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
||||
|
||||
|
|
|
@ -428,6 +428,9 @@
|
|||
#ifndef cpu_scache_line_size
|
||||
#define cpu_scache_line_size() cpu_data[0].scache.linesz
|
||||
#endif
|
||||
#ifndef cpu_tcache_line_size
|
||||
#define cpu_tcache_line_size() cpu_data[0].tcache.linesz
|
||||
#endif
|
||||
|
||||
#ifndef cpu_hwrena_impl_bits
|
||||
#define cpu_hwrena_impl_bits 0
|
||||
|
|
|
@ -33,6 +33,10 @@
|
|||
#define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull))
|
||||
#define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull))
|
||||
#define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull))
|
||||
#define CVMX_L2C_ERR_TDTX(block_id) \
|
||||
(CVMX_ADD_IO_SEG(0x0001180080A007E0ull) + ((block_id) & 3) * 0x40000ull)
|
||||
#define CVMX_L2C_ERR_TTGX(block_id) \
|
||||
(CVMX_ADD_IO_SEG(0x0001180080A007E8ull) + ((block_id) & 3) * 0x40000ull)
|
||||
#define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull))
|
||||
#define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull))
|
||||
#define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull))
|
||||
|
@ -66,9 +70,40 @@
|
|||
((offset) & 1) * 8)
|
||||
#define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + \
|
||||
((offset) & 31) * 8)
|
||||
#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
|
||||
|
||||
|
||||
union cvmx_l2c_err_tdtx {
|
||||
uint64_t u64;
|
||||
struct cvmx_l2c_err_tdtx_s {
|
||||
__BITFIELD_FIELD(uint64_t dbe:1,
|
||||
__BITFIELD_FIELD(uint64_t sbe:1,
|
||||
__BITFIELD_FIELD(uint64_t vdbe:1,
|
||||
__BITFIELD_FIELD(uint64_t vsbe:1,
|
||||
__BITFIELD_FIELD(uint64_t syn:10,
|
||||
__BITFIELD_FIELD(uint64_t reserved_22_49:28,
|
||||
__BITFIELD_FIELD(uint64_t wayidx:18,
|
||||
__BITFIELD_FIELD(uint64_t reserved_2_3:2,
|
||||
__BITFIELD_FIELD(uint64_t type:2,
|
||||
;)))))))))
|
||||
} s;
|
||||
};
|
||||
|
||||
union cvmx_l2c_err_ttgx {
|
||||
uint64_t u64;
|
||||
struct cvmx_l2c_err_ttgx_s {
|
||||
__BITFIELD_FIELD(uint64_t dbe:1,
|
||||
__BITFIELD_FIELD(uint64_t sbe:1,
|
||||
__BITFIELD_FIELD(uint64_t noway:1,
|
||||
__BITFIELD_FIELD(uint64_t reserved_56_60:5,
|
||||
__BITFIELD_FIELD(uint64_t syn:6,
|
||||
__BITFIELD_FIELD(uint64_t reserved_22_49:28,
|
||||
__BITFIELD_FIELD(uint64_t wayidx:15,
|
||||
__BITFIELD_FIELD(uint64_t reserved_2_6:5,
|
||||
__BITFIELD_FIELD(uint64_t type:2,
|
||||
;)))))))))
|
||||
} s;
|
||||
};
|
||||
|
||||
union cvmx_l2c_cfg {
|
||||
uint64_t u64;
|
||||
struct cvmx_l2c_cfg_s {
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
/***********************license start***************
|
||||
* Author: Cavium Networks
|
||||
*
|
||||
* Contact: support@caviumnetworks.com
|
||||
* This file is part of the OCTEON SDK
|
||||
*
|
||||
* Copyright (c) 2003-2017 Cavium, Inc.
|
||||
*
|
||||
* This file is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, Version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This file is distributed in the hope that it will be useful, but
|
||||
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
|
||||
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
|
||||
* NONINFRINGEMENT. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this file; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
* or visit http://www.gnu.org/licenses/.
|
||||
*
|
||||
* This file may also be available under a different license from Cavium.
|
||||
* Contact Cavium Networks for more information
|
||||
***********************license end**************************************/
|
||||
|
||||
#ifndef __CVMX_L2D_DEFS_H__
|
||||
#define __CVMX_L2D_DEFS_H__
|
||||
|
||||
#define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull))
|
||||
#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
|
||||
|
||||
|
||||
union cvmx_l2d_err {
|
||||
uint64_t u64;
|
||||
struct cvmx_l2d_err_s {
|
||||
__BITFIELD_FIELD(uint64_t reserved_6_63:58,
|
||||
__BITFIELD_FIELD(uint64_t bmhclsel:1,
|
||||
__BITFIELD_FIELD(uint64_t ded_err:1,
|
||||
__BITFIELD_FIELD(uint64_t sec_err:1,
|
||||
__BITFIELD_FIELD(uint64_t ded_intena:1,
|
||||
__BITFIELD_FIELD(uint64_t sec_intena:1,
|
||||
__BITFIELD_FIELD(uint64_t ecc_ena:1,
|
||||
;)))))))
|
||||
} s;
|
||||
};
|
||||
|
||||
union cvmx_l2d_fus3 {
|
||||
uint64_t u64;
|
||||
struct cvmx_l2d_fus3_s {
|
||||
__BITFIELD_FIELD(uint64_t reserved_40_63:24,
|
||||
__BITFIELD_FIELD(uint64_t ema_ctl:3,
|
||||
__BITFIELD_FIELD(uint64_t reserved_34_36:3,
|
||||
__BITFIELD_FIELD(uint64_t q3fus:34,
|
||||
;))))
|
||||
} s;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -62,6 +62,7 @@ enum cvmx_mips_space {
|
|||
#include <asm/octeon/cvmx-iob-defs.h>
|
||||
#include <asm/octeon/cvmx-ipd-defs.h>
|
||||
#include <asm/octeon/cvmx-l2c-defs.h>
|
||||
#include <asm/octeon/cvmx-l2d-defs.h>
|
||||
#include <asm/octeon/cvmx-l2t-defs.h>
|
||||
#include <asm/octeon/cvmx-led-defs.h>
|
||||
#include <asm/octeon/cvmx-mio-defs.h>
|
||||
|
|
|
@ -376,9 +376,6 @@ asmlinkage void start_secondary(void)
|
|||
cpumask_set_cpu(cpu, &cpu_coherent_mask);
|
||||
notify_cpu_starting(cpu);
|
||||
|
||||
complete(&cpu_running);
|
||||
synchronise_count_slave(cpu);
|
||||
|
||||
set_cpu_online(cpu, true);
|
||||
|
||||
set_cpu_sibling_map(cpu);
|
||||
|
@ -386,6 +383,9 @@ asmlinkage void start_secondary(void)
|
|||
|
||||
calculate_cpu_foreign_map();
|
||||
|
||||
complete(&cpu_running);
|
||||
synchronise_count_slave(cpu);
|
||||
|
||||
/*
|
||||
* irq will be enabled in ->smp_finish(), enabling it too early
|
||||
* is dangerous.
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
|
||||
#include "uasm.c"
|
||||
|
||||
static const struct insn const insn_table[insn_invalid] = {
|
||||
static const struct insn insn_table[insn_invalid] = {
|
||||
[insn_addiu] = {M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
|
||||
[insn_addu] = {M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD},
|
||||
[insn_and] = {M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD},
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -28,16 +28,15 @@ EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
|
|||
|
||||
static int __init pcibios_set_cache_line_size(void)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
unsigned int lsize;
|
||||
|
||||
/*
|
||||
* Set PCI cacheline size to that of the highest level in the
|
||||
* cache hierarchy.
|
||||
*/
|
||||
lsize = c->dcache.linesz;
|
||||
lsize = c->scache.linesz ? : lsize;
|
||||
lsize = c->tcache.linesz ? : lsize;
|
||||
lsize = cpu_dcache_line_size();
|
||||
lsize = cpu_scache_line_size() ? : lsize;
|
||||
lsize = cpu_tcache_line_size() ? : lsize;
|
||||
|
||||
BUG_ON(!lsize);
|
||||
|
||||
|
|
|
@ -35,7 +35,8 @@ static __always_inline long gettimeofday_fallback(struct timeval *_tv,
|
|||
" syscall\n"
|
||||
: "=r" (ret), "=r" (error)
|
||||
: "r" (tv), "r" (tz), "r" (nr)
|
||||
: "memory");
|
||||
: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
|
||||
"$14", "$15", "$24", "$25", "hi", "lo", "memory");
|
||||
|
||||
return error ? -ret : ret;
|
||||
}
|
||||
|
@ -55,7 +56,8 @@ static __always_inline long clock_gettime_fallback(clockid_t _clkid,
|
|||
" syscall\n"
|
||||
: "=r" (ret), "=r" (error)
|
||||
: "r" (clkid), "r" (ts), "r" (nr)
|
||||
: "memory");
|
||||
: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
|
||||
"$14", "$15", "$24", "$25", "hi", "lo", "memory");
|
||||
|
||||
return error ? -ret : ret;
|
||||
}
|
||||
|
|
|
@ -199,7 +199,7 @@ config PPC
|
|||
select HAVE_OPTPROBES if PPC64
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_EVENTS_NMI if PPC64
|
||||
select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
|
||||
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_RCU_TABLE_FREE if SMP
|
||||
|
|
|
@ -293,7 +293,8 @@ CONFIG_MAGIC_SYSRQ=y
|
|||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_STACKOVERFLOW=y
|
||||
CONFIG_LOCKUP_DETECTOR=y
|
||||
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||
CONFIG_HARDLOCKUP_DETECTOR=y
|
||||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_SCHED_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
|
|
|
@ -324,7 +324,8 @@ CONFIG_MAGIC_SYSRQ=y
|
|||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_STACKOVERFLOW=y
|
||||
CONFIG_LOCKUP_DETECTOR=y
|
||||
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||
CONFIG_HARDLOCKUP_DETECTOR=y
|
||||
CONFIG_DEBUG_MUTEXES=y
|
||||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_SCHED_TRACER=y
|
||||
|
|
|
@ -291,7 +291,8 @@ CONFIG_MAGIC_SYSRQ=y
|
|||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_STACKOVERFLOW=y
|
||||
CONFIG_LOCKUP_DETECTOR=y
|
||||
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||
CONFIG_HARDLOCKUP_DETECTOR=y
|
||||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_SCHED_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
|
|
|
@ -223,17 +223,27 @@ system_call_exit:
|
|||
andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
|
||||
bne- .Lsyscall_exit_work
|
||||
|
||||
/* If MSR_FP and MSR_VEC are set in user msr, then no need to restore */
|
||||
li r7,MSR_FP
|
||||
andi. r0,r8,MSR_FP
|
||||
beq 2f
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
oris r7,r7,MSR_VEC@h
|
||||
andis. r0,r8,MSR_VEC@h
|
||||
bne 3f
|
||||
#endif
|
||||
and r0,r8,r7
|
||||
cmpd r0,r7
|
||||
bne .Lsyscall_restore_math
|
||||
.Lsyscall_restore_math_cont:
|
||||
2: addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
li r10,MSR_RI
|
||||
mtmsrd r10,1 /* Restore RI */
|
||||
#endif
|
||||
bl restore_math
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
li r11,0
|
||||
mtmsrd r11,1
|
||||
#endif
|
||||
ld r8,_MSR(r1)
|
||||
ld r3,RESULT(r1)
|
||||
li r11,-MAX_ERRNO
|
||||
|
||||
cmpld r3,r11
|
||||
3: cmpld r3,r11
|
||||
ld r5,_CCR(r1)
|
||||
bge- .Lsyscall_error
|
||||
.Lsyscall_error_cont:
|
||||
|
@ -267,40 +277,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
|||
std r5,_CCR(r1)
|
||||
b .Lsyscall_error_cont
|
||||
|
||||
.Lsyscall_restore_math:
|
||||
/*
|
||||
* Some initial tests from restore_math to avoid the heavyweight
|
||||
* C code entry and MSR manipulations.
|
||||
*/
|
||||
LOAD_REG_IMMEDIATE(r0, MSR_TS_MASK)
|
||||
and. r0,r0,r8
|
||||
bne 1f
|
||||
|
||||
ld r7,PACACURRENT(r13)
|
||||
lbz r0,THREAD+THREAD_LOAD_FP(r7)
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
lbz r6,THREAD+THREAD_LOAD_VEC(r7)
|
||||
add r0,r0,r6
|
||||
#endif
|
||||
cmpdi r0,0
|
||||
beq .Lsyscall_restore_math_cont
|
||||
|
||||
1: addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
li r10,MSR_RI
|
||||
mtmsrd r10,1 /* Restore RI */
|
||||
#endif
|
||||
bl restore_math
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
li r11,0
|
||||
mtmsrd r11,1
|
||||
#endif
|
||||
/* Restore volatiles, reload MSR from updated one */
|
||||
ld r8,_MSR(r1)
|
||||
ld r3,RESULT(r1)
|
||||
li r11,-MAX_ERRNO
|
||||
b .Lsyscall_restore_math_cont
|
||||
|
||||
/* Traced system call support */
|
||||
.Lsyscall_dotrace:
|
||||
bl save_nvgprs
|
||||
|
|
|
@ -362,7 +362,8 @@ void enable_kernel_vsx(void)
|
|||
|
||||
cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
|
||||
|
||||
if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
|
||||
if (current->thread.regs &&
|
||||
(current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
|
||||
check_if_tm_restore_required(current);
|
||||
/*
|
||||
* If a thread has already been reclaimed then the
|
||||
|
@ -386,7 +387,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
|
|||
{
|
||||
if (tsk->thread.regs) {
|
||||
preempt_disable();
|
||||
if (tsk->thread.regs->msr & MSR_VSX) {
|
||||
if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
|
||||
BUG_ON(tsk != current);
|
||||
giveup_vsx(tsk);
|
||||
}
|
||||
|
@ -511,10 +512,6 @@ void restore_math(struct pt_regs *regs)
|
|||
{
|
||||
unsigned long msr;
|
||||
|
||||
/*
|
||||
* Syscall exit makes a similar initial check before branching
|
||||
* to restore_math. Keep them in synch.
|
||||
*/
|
||||
if (!msr_tm_active(regs->msr) &&
|
||||
!current->thread.load_fp && !loadvec(current->thread))
|
||||
return;
|
||||
|
|
|
@ -351,7 +351,7 @@ static void nmi_ipi_lock_start(unsigned long *flags)
|
|||
hard_irq_disable();
|
||||
while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
|
||||
raw_local_irq_restore(*flags);
|
||||
cpu_relax();
|
||||
spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
|
||||
raw_local_irq_save(*flags);
|
||||
hard_irq_disable();
|
||||
}
|
||||
|
@ -360,7 +360,7 @@ static void nmi_ipi_lock_start(unsigned long *flags)
|
|||
static void nmi_ipi_lock(void)
|
||||
{
|
||||
while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
|
||||
cpu_relax();
|
||||
spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
|
||||
}
|
||||
|
||||
static void nmi_ipi_unlock(void)
|
||||
|
@ -475,7 +475,7 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
|
|||
nmi_ipi_lock_start(&flags);
|
||||
while (nmi_ipi_busy_count) {
|
||||
nmi_ipi_unlock_end(&flags);
|
||||
cpu_relax();
|
||||
spin_until_cond(nmi_ipi_busy_count == 0);
|
||||
nmi_ipi_lock_start(&flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -71,15 +71,20 @@ static inline void wd_smp_lock(unsigned long *flags)
|
|||
* This may be called from low level interrupt handlers at some
|
||||
* point in future.
|
||||
*/
|
||||
local_irq_save(*flags);
|
||||
while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock)))
|
||||
cpu_relax();
|
||||
raw_local_irq_save(*flags);
|
||||
hard_irq_disable(); /* Make it soft-NMI safe */
|
||||
while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) {
|
||||
raw_local_irq_restore(*flags);
|
||||
spin_until_cond(!test_bit(0, &__wd_smp_lock));
|
||||
raw_local_irq_save(*flags);
|
||||
hard_irq_disable();
|
||||
}
|
||||
}
|
||||
|
||||
static inline void wd_smp_unlock(unsigned long *flags)
|
||||
{
|
||||
clear_bit_unlock(0, &__wd_smp_lock);
|
||||
local_irq_restore(*flags);
|
||||
raw_local_irq_restore(*flags);
|
||||
}
|
||||
|
||||
static void wd_lockup_ipi(struct pt_regs *regs)
|
||||
|
@ -96,10 +101,10 @@ static void wd_lockup_ipi(struct pt_regs *regs)
|
|||
nmi_panic(regs, "Hard LOCKUP");
|
||||
}
|
||||
|
||||
static void set_cpu_stuck(int cpu, u64 tb)
|
||||
static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb)
|
||||
{
|
||||
cpumask_set_cpu(cpu, &wd_smp_cpus_stuck);
|
||||
cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
|
||||
cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask);
|
||||
cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask);
|
||||
if (cpumask_empty(&wd_smp_cpus_pending)) {
|
||||
wd_smp_last_reset_tb = tb;
|
||||
cpumask_andnot(&wd_smp_cpus_pending,
|
||||
|
@ -107,6 +112,10 @@ static void set_cpu_stuck(int cpu, u64 tb)
|
|||
&wd_smp_cpus_stuck);
|
||||
}
|
||||
}
|
||||
static void set_cpu_stuck(int cpu, u64 tb)
|
||||
{
|
||||
set_cpumask_stuck(cpumask_of(cpu), tb);
|
||||
}
|
||||
|
||||
static void watchdog_smp_panic(int cpu, u64 tb)
|
||||
{
|
||||
|
@ -135,11 +144,9 @@ static void watchdog_smp_panic(int cpu, u64 tb)
|
|||
}
|
||||
smp_flush_nmi_ipi(1000000);
|
||||
|
||||
/* Take the stuck CPU out of the watch group */
|
||||
for_each_cpu(c, &wd_smp_cpus_pending)
|
||||
set_cpu_stuck(c, tb);
|
||||
/* Take the stuck CPUs out of the watch group */
|
||||
set_cpumask_stuck(&wd_smp_cpus_pending, tb);
|
||||
|
||||
out:
|
||||
wd_smp_unlock(&flags);
|
||||
|
||||
printk_safe_flush();
|
||||
|
@ -152,6 +159,11 @@ out:
|
|||
|
||||
if (hardlockup_panic)
|
||||
nmi_panic(NULL, "Hard LOCKUP");
|
||||
|
||||
return;
|
||||
|
||||
out:
|
||||
wd_smp_unlock(&flags);
|
||||
}
|
||||
|
||||
static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
|
||||
|
@ -258,9 +270,11 @@ static void wd_timer_fn(unsigned long data)
|
|||
|
||||
void arch_touch_nmi_watchdog(void)
|
||||
{
|
||||
unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
watchdog_timer_interrupt(cpu);
|
||||
if (get_tb() - per_cpu(wd_timer_tb, cpu) >= ticks)
|
||||
watchdog_timer_interrupt(cpu);
|
||||
}
|
||||
EXPORT_SYMBOL(arch_touch_nmi_watchdog);
|
||||
|
||||
|
@ -283,6 +297,8 @@ static void stop_watchdog_timer_on(unsigned int cpu)
|
|||
|
||||
static int start_wd_on_cpu(unsigned int cpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
|
@ -297,12 +313,14 @@ static int start_wd_on_cpu(unsigned int cpu)
|
|||
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
|
||||
return 0;
|
||||
|
||||
wd_smp_lock(&flags);
|
||||
cpumask_set_cpu(cpu, &wd_cpus_enabled);
|
||||
if (cpumask_weight(&wd_cpus_enabled) == 1) {
|
||||
cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
|
||||
wd_smp_last_reset_tb = get_tb();
|
||||
}
|
||||
smp_wmb();
|
||||
wd_smp_unlock(&flags);
|
||||
|
||||
start_watchdog_timer_on(cpu);
|
||||
|
||||
return 0;
|
||||
|
@ -310,12 +328,17 @@ static int start_wd_on_cpu(unsigned int cpu)
|
|||
|
||||
static int stop_wd_on_cpu(unsigned int cpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
|
||||
return 0; /* Can happen in CPU unplug case */
|
||||
|
||||
stop_watchdog_timer_on(cpu);
|
||||
|
||||
wd_smp_lock(&flags);
|
||||
cpumask_clear_cpu(cpu, &wd_cpus_enabled);
|
||||
wd_smp_unlock(&flags);
|
||||
|
||||
wd_smp_clear_cpu_pending(cpu, get_tb());
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -265,8 +265,11 @@ static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
|
|||
{
|
||||
struct kvmppc_spapr_tce_table *stt = filp->private_data;
|
||||
struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
|
||||
struct kvm *kvm = stt->kvm;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
list_del_rcu(&stt->list);
|
||||
mutex_unlock(&kvm->lock);
|
||||
|
||||
list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
|
||||
WARN_ON(!kref_read(&stit->kref));
|
||||
|
@ -294,6 +297,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
|||
struct kvm_create_spapr_tce_64 *args)
|
||||
{
|
||||
struct kvmppc_spapr_tce_table *stt = NULL;
|
||||
struct kvmppc_spapr_tce_table *siter;
|
||||
unsigned long npages, size;
|
||||
int ret = -ENOMEM;
|
||||
int i;
|
||||
|
@ -301,25 +305,17 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
|||
if (!args->size)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check this LIOBN hasn't been previously allocated */
|
||||
list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
|
||||
if (stt->liobn == args->liobn)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
|
||||
npages = kvmppc_tce_pages(size);
|
||||
ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
|
||||
if (ret) {
|
||||
stt = NULL;
|
||||
goto fail;
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = -ENOMEM;
|
||||
stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
|
||||
GFP_KERNEL);
|
||||
if (!stt)
|
||||
goto fail;
|
||||
goto fail_acct;
|
||||
|
||||
stt->liobn = args->liobn;
|
||||
stt->page_shift = args->page_shift;
|
||||
|
@ -334,24 +330,39 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
kvm_get_kvm(kvm);
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
|
||||
|
||||
/* Check this LIOBN hasn't been previously allocated */
|
||||
ret = 0;
|
||||
list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
|
||||
if (siter->liobn == args->liobn) {
|
||||
ret = -EBUSY;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
|
||||
stt, O_RDWR | O_CLOEXEC);
|
||||
|
||||
if (ret >= 0) {
|
||||
list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
|
||||
kvm_get_kvm(kvm);
|
||||
}
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
|
||||
return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
|
||||
stt, O_RDWR | O_CLOEXEC);
|
||||
if (ret >= 0)
|
||||
return ret;
|
||||
|
||||
fail:
|
||||
if (stt) {
|
||||
for (i = 0; i < npages; i++)
|
||||
if (stt->pages[i])
|
||||
__free_page(stt->pages[i]);
|
||||
fail:
|
||||
for (i = 0; i < npages; i++)
|
||||
if (stt->pages[i])
|
||||
__free_page(stt->pages[i]);
|
||||
|
||||
kfree(stt);
|
||||
}
|
||||
kfree(stt);
|
||||
fail_acct:
|
||||
kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1298,6 +1298,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
|||
/* Hypervisor doorbell - exit only if host IPI flag set */
|
||||
cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
|
||||
bne 3f
|
||||
BEGIN_FTR_SECTION
|
||||
PPC_MSGSYNC
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
lbz r0, HSTATE_HOST_IPI(r13)
|
||||
cmpwi r0, 0
|
||||
beq 4f
|
||||
|
|
|
@ -16,7 +16,22 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
|
|||
u8 cppr;
|
||||
u16 ack;
|
||||
|
||||
/* XXX DD1 bug workaround: Check PIPR vs. CPPR first ! */
|
||||
/*
|
||||
* Ensure any previous store to CPPR is ordered vs.
|
||||
* the subsequent loads from PIPR or ACK.
|
||||
*/
|
||||
eieio();
|
||||
|
||||
/*
|
||||
* DD1 bug workaround: If PIPR is less favored than CPPR
|
||||
* ignore the interrupt or we might incorrectly lose an IPB
|
||||
* bit.
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
|
||||
u8 pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
|
||||
if (pipr >= xc->hw_cppr)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Perform the acknowledge OS to register cycle. */
|
||||
ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
|
||||
|
@ -235,6 +250,11 @@ skip_ipi:
|
|||
/*
|
||||
* If we found an interrupt, adjust what the guest CPPR should
|
||||
* be as if we had just fetched that interrupt from HW.
|
||||
*
|
||||
* Note: This can only make xc->cppr smaller as the previous
|
||||
* loop will only exit with hirq != 0 if prio is lower than
|
||||
* the current xc->cppr. Thus we don't need to re-check xc->mfrr
|
||||
* for pending IPIs.
|
||||
*/
|
||||
if (hirq)
|
||||
xc->cppr = prio;
|
||||
|
@ -380,6 +400,12 @@ X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
|
|||
old_cppr = xc->cppr;
|
||||
xc->cppr = cppr;
|
||||
|
||||
/*
|
||||
* Order the above update of xc->cppr with the subsequent
|
||||
* read of xc->mfrr inside push_pending_to_hw()
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* We are masking less, we need to look for pending things
|
||||
* to deliver and set VP pending bits accordingly to trigger
|
||||
|
@ -420,21 +446,37 @@ X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
|
|||
* used to signal MFRR changes is EOId when fetched from
|
||||
* the queue.
|
||||
*/
|
||||
if (irq == XICS_IPI || irq == 0)
|
||||
if (irq == XICS_IPI || irq == 0) {
|
||||
/*
|
||||
* This barrier orders the setting of xc->cppr vs.
|
||||
* subsquent test of xc->mfrr done inside
|
||||
* scan_interrupts and push_pending_to_hw
|
||||
*/
|
||||
smp_mb();
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/* Find interrupt source */
|
||||
sb = kvmppc_xive_find_source(xive, irq, &src);
|
||||
if (!sb) {
|
||||
pr_devel(" source not found !\n");
|
||||
rc = H_PARAMETER;
|
||||
/* Same as above */
|
||||
smp_mb();
|
||||
goto bail;
|
||||
}
|
||||
state = &sb->irq_state[src];
|
||||
kvmppc_xive_select_irq(state, &hw_num, &xd);
|
||||
|
||||
state->in_eoi = true;
|
||||
mb();
|
||||
|
||||
/*
|
||||
* This barrier orders both setting of in_eoi above vs,
|
||||
* subsequent test of guest_priority, and the setting
|
||||
* of xc->cppr vs. subsquent test of xc->mfrr done inside
|
||||
* scan_interrupts and push_pending_to_hw
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
again:
|
||||
if (state->guest_priority == MASKED) {
|
||||
|
@ -461,6 +503,14 @@ again:
|
|||
|
||||
}
|
||||
|
||||
/*
|
||||
* This barrier orders the above guest_priority check
|
||||
* and spin_lock/unlock with clearing in_eoi below.
|
||||
*
|
||||
* It also has to be a full mb() as it must ensure
|
||||
* the MMIOs done in source_eoi() are completed before
|
||||
* state->in_eoi is visible.
|
||||
*/
|
||||
mb();
|
||||
state->in_eoi = false;
|
||||
bail:
|
||||
|
@ -495,6 +545,18 @@ X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
|
|||
/* Locklessly write over MFRR */
|
||||
xc->mfrr = mfrr;
|
||||
|
||||
/*
|
||||
* The load of xc->cppr below and the subsequent MMIO store
|
||||
* to the IPI must happen after the above mfrr update is
|
||||
* globally visible so that:
|
||||
*
|
||||
* - Synchronize with another CPU doing an H_EOI or a H_CPPR
|
||||
* updating xc->cppr then reading xc->mfrr.
|
||||
*
|
||||
* - The target of the IPI sees the xc->mfrr update
|
||||
*/
|
||||
mb();
|
||||
|
||||
/* Shoot the IPI if most favored than target cppr */
|
||||
if (mfrr < xc->cppr)
|
||||
__x_writeq(0, __x_trig_page(&xc->vp_ipi_data));
|
||||
|
|
|
@ -56,6 +56,7 @@ u64 pnv_first_deep_stop_state = MAX_STOP_STATE;
|
|||
*/
|
||||
static u64 pnv_deepest_stop_psscr_val;
|
||||
static u64 pnv_deepest_stop_psscr_mask;
|
||||
static u64 pnv_deepest_stop_flag;
|
||||
static bool deepest_stop_found;
|
||||
|
||||
static int pnv_save_sprs_for_deep_states(void)
|
||||
|
@ -185,8 +186,40 @@ static void pnv_alloc_idle_core_states(void)
|
|||
|
||||
update_subcore_sibling_mask();
|
||||
|
||||
if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
|
||||
pnv_save_sprs_for_deep_states();
|
||||
if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
|
||||
int rc = pnv_save_sprs_for_deep_states();
|
||||
|
||||
if (likely(!rc))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The stop-api is unable to restore hypervisor
|
||||
* resources on wakeup from platform idle states which
|
||||
* lose full context. So disable such states.
|
||||
*/
|
||||
supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
|
||||
pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
|
||||
pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300) &&
|
||||
(pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
|
||||
/*
|
||||
* Use the default stop state for CPU-Hotplug
|
||||
* if available.
|
||||
*/
|
||||
if (default_stop_found) {
|
||||
pnv_deepest_stop_psscr_val =
|
||||
pnv_default_stop_val;
|
||||
pnv_deepest_stop_psscr_mask =
|
||||
pnv_default_stop_mask;
|
||||
pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
|
||||
pnv_deepest_stop_psscr_val);
|
||||
} else { /* Fallback to snooze loop for CPU-Hotplug */
|
||||
deepest_stop_found = false;
|
||||
pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
u32 pnv_get_supported_cpuidle_states(void)
|
||||
|
@ -375,7 +408,8 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
|
|||
pnv_deepest_stop_psscr_val;
|
||||
srr1 = power9_idle_stop(psscr);
|
||||
|
||||
} else if (idle_states & OPAL_PM_WINKLE_ENABLED) {
|
||||
} else if ((idle_states & OPAL_PM_WINKLE_ENABLED) &&
|
||||
(idle_states & OPAL_PM_LOSE_FULL_CONTEXT)) {
|
||||
srr1 = power7_idle_insn(PNV_THREAD_WINKLE);
|
||||
} else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
|
||||
(idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
|
||||
|
@ -553,6 +587,7 @@ static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags,
|
|||
max_residency_ns = residency_ns[i];
|
||||
pnv_deepest_stop_psscr_val = psscr_val[i];
|
||||
pnv_deepest_stop_psscr_mask = psscr_mask[i];
|
||||
pnv_deepest_stop_flag = flags[i];
|
||||
deepest_stop_found = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,10 +47,9 @@ struct mmu_table_batch {
|
|||
extern void tlb_table_flush(struct mmu_gather *tlb);
|
||||
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
|
||||
|
||||
static inline void tlb_gather_mmu(struct mmu_gather *tlb,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
static inline void
|
||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
tlb->mm = mm;
|
||||
tlb->start = start;
|
||||
|
@ -76,9 +75,15 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
|
|||
tlb_flush_mmu_free(tlb);
|
||||
}
|
||||
|
||||
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end)
|
||||
static inline void
|
||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end, bool force)
|
||||
{
|
||||
if (force) {
|
||||
tlb->start = start;
|
||||
tlb->end = end;
|
||||
}
|
||||
|
||||
tlb_flush_mmu(tlb);
|
||||
}
|
||||
|
||||
|
|
|
@ -1253,7 +1253,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
|
|||
insn_count = bpf_jit_insn(jit, fp, i);
|
||||
if (insn_count < 0)
|
||||
return -1;
|
||||
jit->addrs[i + 1] = jit->prg; /* Next instruction address */
|
||||
/* Next instruction address */
|
||||
jit->addrs[i + insn_count] = jit->prg;
|
||||
}
|
||||
bpf_jit_epilogue(jit);
|
||||
|
||||
|
|
|
@ -36,7 +36,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
|
|||
}
|
||||
|
||||
static inline void
|
||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
|
||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
tlb->mm = mm;
|
||||
tlb->start = start;
|
||||
|
@ -47,9 +48,10 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
|
|||
}
|
||||
|
||||
static inline void
|
||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end, bool force)
|
||||
{
|
||||
if (tlb->fullmm)
|
||||
if (tlb->fullmm || force)
|
||||
flush_tlb_mm(tlb->mm);
|
||||
|
||||
/* keep the page table cache within bounds */
|
||||
|
|
|
@ -47,10 +47,26 @@
|
|||
#define SUN4V_CHIP_NIAGARA5 0x05
|
||||
#define SUN4V_CHIP_SPARC_M6 0x06
|
||||
#define SUN4V_CHIP_SPARC_M7 0x07
|
||||
#define SUN4V_CHIP_SPARC_M8 0x08
|
||||
#define SUN4V_CHIP_SPARC64X 0x8a
|
||||
#define SUN4V_CHIP_SPARC_SN 0x8b
|
||||
#define SUN4V_CHIP_UNKNOWN 0xff
|
||||
|
||||
/*
|
||||
* The following CPU_ID_xxx constants are used
|
||||
* to identify the CPU type in the setup phase
|
||||
* (see head_64.S)
|
||||
*/
|
||||
#define CPU_ID_NIAGARA1 ('1')
|
||||
#define CPU_ID_NIAGARA2 ('2')
|
||||
#define CPU_ID_NIAGARA3 ('3')
|
||||
#define CPU_ID_NIAGARA4 ('4')
|
||||
#define CPU_ID_NIAGARA5 ('5')
|
||||
#define CPU_ID_M6 ('6')
|
||||
#define CPU_ID_M7 ('7')
|
||||
#define CPU_ID_M8 ('8')
|
||||
#define CPU_ID_SONOMA1 ('N')
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
enum ultra_tlb_layout {
|
||||
|
|
|
@ -506,6 +506,12 @@ static void __init sun4v_cpu_probe(void)
|
|||
sparc_pmu_type = "sparc-m7";
|
||||
break;
|
||||
|
||||
case SUN4V_CHIP_SPARC_M8:
|
||||
sparc_cpu_type = "SPARC-M8";
|
||||
sparc_fpu_type = "SPARC-M8 integrated FPU";
|
||||
sparc_pmu_type = "sparc-m8";
|
||||
break;
|
||||
|
||||
case SUN4V_CHIP_SPARC_SN:
|
||||
sparc_cpu_type = "SPARC-SN";
|
||||
sparc_fpu_type = "SPARC-SN integrated FPU";
|
||||
|
|
|
@ -328,6 +328,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
|
|||
case SUN4V_CHIP_NIAGARA5:
|
||||
case SUN4V_CHIP_SPARC_M6:
|
||||
case SUN4V_CHIP_SPARC_M7:
|
||||
case SUN4V_CHIP_SPARC_M8:
|
||||
case SUN4V_CHIP_SPARC_SN:
|
||||
case SUN4V_CHIP_SPARC64X:
|
||||
rover_inc_table = niagara_iterate_method;
|
||||
|
|
|
@ -424,22 +424,25 @@ EXPORT_SYMBOL(sun4v_chip_type)
|
|||
nop
|
||||
|
||||
70: ldub [%g1 + 7], %g2
|
||||
cmp %g2, '3'
|
||||
cmp %g2, CPU_ID_NIAGARA3
|
||||
be,pt %xcc, 5f
|
||||
mov SUN4V_CHIP_NIAGARA3, %g4
|
||||
cmp %g2, '4'
|
||||
cmp %g2, CPU_ID_NIAGARA4
|
||||
be,pt %xcc, 5f
|
||||
mov SUN4V_CHIP_NIAGARA4, %g4
|
||||
cmp %g2, '5'
|
||||
cmp %g2, CPU_ID_NIAGARA5
|
||||
be,pt %xcc, 5f
|
||||
mov SUN4V_CHIP_NIAGARA5, %g4
|
||||
cmp %g2, '6'
|
||||
cmp %g2, CPU_ID_M6
|
||||
be,pt %xcc, 5f
|
||||
mov SUN4V_CHIP_SPARC_M6, %g4
|
||||
cmp %g2, '7'
|
||||
cmp %g2, CPU_ID_M7
|
||||
be,pt %xcc, 5f
|
||||
mov SUN4V_CHIP_SPARC_M7, %g4
|
||||
cmp %g2, 'N'
|
||||
cmp %g2, CPU_ID_M8
|
||||
be,pt %xcc, 5f
|
||||
mov SUN4V_CHIP_SPARC_M8, %g4
|
||||
cmp %g2, CPU_ID_SONOMA1
|
||||
be,pt %xcc, 5f
|
||||
mov SUN4V_CHIP_SPARC_SN, %g4
|
||||
ba,pt %xcc, 49f
|
||||
|
@ -448,10 +451,10 @@ EXPORT_SYMBOL(sun4v_chip_type)
|
|||
91: sethi %hi(prom_cpu_compatible), %g1
|
||||
or %g1, %lo(prom_cpu_compatible), %g1
|
||||
ldub [%g1 + 17], %g2
|
||||
cmp %g2, '1'
|
||||
cmp %g2, CPU_ID_NIAGARA1
|
||||
be,pt %xcc, 5f
|
||||
mov SUN4V_CHIP_NIAGARA1, %g4
|
||||
cmp %g2, '2'
|
||||
cmp %g2, CPU_ID_NIAGARA2
|
||||
be,pt %xcc, 5f
|
||||
mov SUN4V_CHIP_NIAGARA2, %g4
|
||||
|
||||
|
@ -600,6 +603,9 @@ niagara_tlb_fixup:
|
|||
be,pt %xcc, niagara4_patch
|
||||
nop
|
||||
cmp %g1, SUN4V_CHIP_SPARC_M7
|
||||
be,pt %xcc, niagara4_patch
|
||||
nop
|
||||
cmp %g1, SUN4V_CHIP_SPARC_M8
|
||||
be,pt %xcc, niagara4_patch
|
||||
nop
|
||||
cmp %g1, SUN4V_CHIP_SPARC_SN
|
||||
|
|
|
@ -288,10 +288,17 @@ static void __init sun4v_patch(void)
|
|||
|
||||
sun4v_patch_2insn_range(&__sun4v_2insn_patch,
|
||||
&__sun4v_2insn_patch_end);
|
||||
if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_SN)
|
||||
|
||||
switch (sun4v_chip_type) {
|
||||
case SUN4V_CHIP_SPARC_M7:
|
||||
case SUN4V_CHIP_SPARC_M8:
|
||||
case SUN4V_CHIP_SPARC_SN:
|
||||
sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
|
||||
&__sun_m7_2insn_patch_end);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
sun4v_hvapi_init();
|
||||
}
|
||||
|
@ -529,6 +536,7 @@ static void __init init_sparc64_elf_hwcap(void)
|
|||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
|
||||
cap |= HWCAP_SPARC_BLKINIT;
|
||||
|
@ -538,6 +546,7 @@ static void __init init_sparc64_elf_hwcap(void)
|
|||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
|
||||
cap |= HWCAP_SPARC_N2;
|
||||
|
@ -568,6 +577,7 @@ static void __init init_sparc64_elf_hwcap(void)
|
|||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
|
||||
cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
|
||||
|
@ -578,6 +588,7 @@ static void __init init_sparc64_elf_hwcap(void)
|
|||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
|
||||
cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
|
||||
|
|
|
@ -1944,12 +1944,22 @@ static void __init setup_page_offset(void)
|
|||
break;
|
||||
case SUN4V_CHIP_SPARC_M7:
|
||||
case SUN4V_CHIP_SPARC_SN:
|
||||
default:
|
||||
/* M7 and later support 52-bit virtual addresses. */
|
||||
sparc64_va_hole_top = 0xfff8000000000000UL;
|
||||
sparc64_va_hole_bottom = 0x0008000000000000UL;
|
||||
max_phys_bits = 49;
|
||||
break;
|
||||
case SUN4V_CHIP_SPARC_M8:
|
||||
default:
|
||||
/* M8 and later support 54-bit virtual addresses.
|
||||
* However, restricting M8 and above VA bits to 53
|
||||
* as 4-level page table cannot support more than
|
||||
* 53 VA bits.
|
||||
*/
|
||||
sparc64_va_hole_top = 0xfff0000000000000UL;
|
||||
sparc64_va_hole_bottom = 0x0010000000000000UL;
|
||||
max_phys_bits = 51;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2161,6 +2171,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
|
|||
*/
|
||||
switch (sun4v_chip_type) {
|
||||
case SUN4V_CHIP_SPARC_M7:
|
||||
case SUN4V_CHIP_SPARC_M8:
|
||||
case SUN4V_CHIP_SPARC_SN:
|
||||
pagecv_flag = 0x00;
|
||||
break;
|
||||
|
@ -2313,6 +2324,7 @@ void __init paging_init(void)
|
|||
*/
|
||||
switch (sun4v_chip_type) {
|
||||
case SUN4V_CHIP_SPARC_M7:
|
||||
case SUN4V_CHIP_SPARC_M8:
|
||||
case SUN4V_CHIP_SPARC_SN:
|
||||
page_cache4v_flag = _PAGE_CP_4V;
|
||||
break;
|
||||
|
|
|
@ -45,7 +45,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
|
|||
}
|
||||
|
||||
static inline void
|
||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
|
||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
tlb->mm = mm;
|
||||
tlb->start = start;
|
||||
|
@ -80,13 +81,19 @@ tlb_flush_mmu(struct mmu_gather *tlb)
|
|||
tlb_flush_mmu_free(tlb);
|
||||
}
|
||||
|
||||
/* tlb_finish_mmu
|
||||
/* arch_tlb_finish_mmu
|
||||
* Called at the end of the shootdown operation to free up any resources
|
||||
* that were required.
|
||||
*/
|
||||
static inline void
|
||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end, bool force)
|
||||
{
|
||||
if (force) {
|
||||
tlb->start = start;
|
||||
tlb->end = end;
|
||||
tlb->need_flush = 1;
|
||||
}
|
||||
tlb_flush_mmu(tlb);
|
||||
|
||||
/* keep the page table cache within bounds */
|
||||
|
|
|
@ -100,6 +100,7 @@ config X86
|
|||
select GENERIC_STRNCPY_FROM_USER
|
||||
select GENERIC_STRNLEN_USER
|
||||
select GENERIC_TIME_VSYSCALL
|
||||
select HARDLOCKUP_CHECK_TIMESTAMP if X86_64
|
||||
select HAVE_ACPI_APEI if ACPI
|
||||
select HAVE_ACPI_APEI_NMI if ACPI
|
||||
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
|
||||
|
@ -163,7 +164,7 @@ config X86
|
|||
select HAVE_PCSPKR_PLATFORM
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_EVENTS_NMI
|
||||
select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI
|
||||
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
|
|
|
@ -117,11 +117,10 @@
|
|||
.set T1, REG_T1
|
||||
.endm
|
||||
|
||||
#define K_BASE %r8
|
||||
#define HASH_PTR %r9
|
||||
#define BLOCKS_CTR %r8
|
||||
#define BUFFER_PTR %r10
|
||||
#define BUFFER_PTR2 %r13
|
||||
#define BUFFER_END %r11
|
||||
|
||||
#define PRECALC_BUF %r14
|
||||
#define WK_BUF %r15
|
||||
|
@ -205,14 +204,14 @@
|
|||
* blended AVX2 and ALU instruction scheduling
|
||||
* 1 vector iteration per 8 rounds
|
||||
*/
|
||||
vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP
|
||||
vmovdqu (i * 2)(BUFFER_PTR), W_TMP
|
||||
.elseif ((i & 7) == 1)
|
||||
vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\
|
||||
vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\
|
||||
WY_TMP, WY_TMP
|
||||
.elseif ((i & 7) == 2)
|
||||
vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
|
||||
.elseif ((i & 7) == 4)
|
||||
vpaddd K_XMM(K_BASE), WY, WY_TMP
|
||||
vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
|
||||
.elseif ((i & 7) == 7)
|
||||
vmovdqu WY_TMP, PRECALC_WK(i&~7)
|
||||
|
||||
|
@ -255,7 +254,7 @@
|
|||
vpxor WY, WY_TMP, WY_TMP
|
||||
.elseif ((i & 7) == 7)
|
||||
vpxor WY_TMP2, WY_TMP, WY
|
||||
vpaddd K_XMM(K_BASE), WY, WY_TMP
|
||||
vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
|
||||
vmovdqu WY_TMP, PRECALC_WK(i&~7)
|
||||
|
||||
PRECALC_ROTATE_WY
|
||||
|
@ -291,7 +290,7 @@
|
|||
vpsrld $30, WY, WY
|
||||
vpor WY, WY_TMP, WY
|
||||
.elseif ((i & 7) == 7)
|
||||
vpaddd K_XMM(K_BASE), WY, WY_TMP
|
||||
vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
|
||||
vmovdqu WY_TMP, PRECALC_WK(i&~7)
|
||||
|
||||
PRECALC_ROTATE_WY
|
||||
|
@ -446,6 +445,16 @@
|
|||
|
||||
.endm
|
||||
|
||||
/* Add constant only if (%2 > %3) condition met (uses RTA as temp)
|
||||
* %1 + %2 >= %3 ? %4 : 0
|
||||
*/
|
||||
.macro ADD_IF_GE a, b, c, d
|
||||
mov \a, RTA
|
||||
add $\d, RTA
|
||||
cmp $\c, \b
|
||||
cmovge RTA, \a
|
||||
.endm
|
||||
|
||||
/*
|
||||
* macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining
|
||||
*/
|
||||
|
@ -463,13 +472,16 @@
|
|||
lea (2*4*80+32)(%rsp), WK_BUF
|
||||
|
||||
# Precalc WK for first 2 blocks
|
||||
PRECALC_OFFSET = 0
|
||||
ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64
|
||||
.set i, 0
|
||||
.rept 160
|
||||
PRECALC i
|
||||
.set i, i + 1
|
||||
.endr
|
||||
PRECALC_OFFSET = 128
|
||||
|
||||
/* Go to next block if needed */
|
||||
ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128
|
||||
ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
|
||||
xchg WK_BUF, PRECALC_BUF
|
||||
|
||||
.align 32
|
||||
|
@ -479,8 +491,8 @@ _loop:
|
|||
* we use K_BASE value as a signal of a last block,
|
||||
* it is set below by: cmovae BUFFER_PTR, K_BASE
|
||||
*/
|
||||
cmp K_BASE, BUFFER_PTR
|
||||
jne _begin
|
||||
test BLOCKS_CTR, BLOCKS_CTR
|
||||
jnz _begin
|
||||
.align 32
|
||||
jmp _end
|
||||
.align 32
|
||||
|
@ -512,10 +524,10 @@ _loop0:
|
|||
.set j, j+2
|
||||
.endr
|
||||
|
||||
add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */
|
||||
cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */
|
||||
cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
|
||||
|
||||
/* Update Counter */
|
||||
sub $1, BLOCKS_CTR
|
||||
/* Move to the next block only if needed*/
|
||||
ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128
|
||||
/*
|
||||
* rounds
|
||||
* 60,62,64,66,68
|
||||
|
@ -532,8 +544,8 @@ _loop0:
|
|||
UPDATE_HASH 12(HASH_PTR), D
|
||||
UPDATE_HASH 16(HASH_PTR), E
|
||||
|
||||
cmp K_BASE, BUFFER_PTR /* is current block the last one? */
|
||||
je _loop
|
||||
test BLOCKS_CTR, BLOCKS_CTR
|
||||
jz _loop
|
||||
|
||||
mov TB, B
|
||||
|
||||
|
@ -575,10 +587,10 @@ _loop2:
|
|||
.set j, j+2
|
||||
.endr
|
||||
|
||||
add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */
|
||||
|
||||
cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */
|
||||
cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
|
||||
/* update counter */
|
||||
sub $1, BLOCKS_CTR
|
||||
/* Move to the next block only if needed*/
|
||||
ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
|
||||
|
||||
jmp _loop3
|
||||
_loop3:
|
||||
|
@ -641,19 +653,12 @@ _loop3:
|
|||
|
||||
avx2_zeroupper
|
||||
|
||||
lea K_XMM_AR(%rip), K_BASE
|
||||
|
||||
/* Setup initial values */
|
||||
mov CTX, HASH_PTR
|
||||
mov BUF, BUFFER_PTR
|
||||
lea 64(BUF), BUFFER_PTR2
|
||||
|
||||
shl $6, CNT /* mul by 64 */
|
||||
add BUF, CNT
|
||||
add $64, CNT
|
||||
mov CNT, BUFFER_END
|
||||
|
||||
cmp BUFFER_END, BUFFER_PTR2
|
||||
cmovae K_BASE, BUFFER_PTR2
|
||||
mov BUF, BUFFER_PTR2
|
||||
mov CNT, BLOCKS_CTR
|
||||
|
||||
xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP
|
||||
|
||||
|
|
|
@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
|
|||
|
||||
static bool avx2_usable(void)
|
||||
{
|
||||
if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
|
||||
if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
|
||||
&& boot_cpu_has(X86_FEATURE_BMI1)
|
||||
&& boot_cpu_has(X86_FEATURE_BMI2))
|
||||
return true;
|
||||
|
|
|
@ -1211,6 +1211,8 @@ ENTRY(nmi)
|
|||
* other IST entries.
|
||||
*/
|
||||
|
||||
ASM_CLAC
|
||||
|
||||
/* Use %rdx as our temp variable throughout */
|
||||
pushq %rdx
|
||||
|
||||
|
|
|
@ -2114,7 +2114,7 @@ static void refresh_pce(void *ignored)
|
|||
load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm));
|
||||
}
|
||||
|
||||
static void x86_pmu_event_mapped(struct perf_event *event)
|
||||
static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
|
||||
{
|
||||
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
|
||||
return;
|
||||
|
@ -2129,22 +2129,20 @@ static void x86_pmu_event_mapped(struct perf_event *event)
|
|||
* For now, this can't happen because all callers hold mmap_sem
|
||||
* for write. If this changes, we'll need a different solution.
|
||||
*/
|
||||
lockdep_assert_held_exclusive(¤t->mm->mmap_sem);
|
||||
lockdep_assert_held_exclusive(&mm->mmap_sem);
|
||||
|
||||
if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1)
|
||||
on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
|
||||
if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
|
||||
on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
|
||||
}
|
||||
|
||||
static void x86_pmu_event_unmapped(struct perf_event *event)
|
||||
static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
|
||||
{
|
||||
if (!current->mm)
|
||||
return;
|
||||
|
||||
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
|
||||
return;
|
||||
|
||||
if (atomic_dec_and_test(¤t->mm->context.perf_rdpmc_allowed))
|
||||
on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
|
||||
if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
|
||||
on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
|
||||
}
|
||||
|
||||
static int x86_pmu_event_idx(struct perf_event *event)
|
||||
|
|
|
@ -69,7 +69,7 @@ struct bts_buffer {
|
|||
struct bts_phys buf[0];
|
||||
};
|
||||
|
||||
struct pmu bts_pmu;
|
||||
static struct pmu bts_pmu;
|
||||
|
||||
static size_t buf_size(struct page *page)
|
||||
{
|
||||
|
|
|
@ -587,7 +587,7 @@ static __initconst const u64 p4_hw_cache_event_ids
|
|||
* P4_CONFIG_ALIASABLE or bits for P4_PEBS_METRIC, they are
|
||||
* either up to date automatically or not applicable at all.
|
||||
*/
|
||||
struct p4_event_alias {
|
||||
static struct p4_event_alias {
|
||||
u64 original;
|
||||
u64 alternative;
|
||||
} p4_event_aliases[] = {
|
||||
|
|
|
@ -559,7 +559,7 @@ static struct attribute_group rapl_pmu_format_group = {
|
|||
.attrs = rapl_formats_attr,
|
||||
};
|
||||
|
||||
const struct attribute_group *rapl_attr_groups[] = {
|
||||
static const struct attribute_group *rapl_attr_groups[] = {
|
||||
&rapl_pmu_attr_group,
|
||||
&rapl_pmu_format_group,
|
||||
&rapl_pmu_events_group,
|
||||
|
|
|
@ -721,7 +721,7 @@ static struct attribute *uncore_pmu_attrs[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group uncore_pmu_attr_group = {
|
||||
static const struct attribute_group uncore_pmu_attr_group = {
|
||||
.attrs = uncore_pmu_attrs,
|
||||
};
|
||||
|
||||
|
|
|
@ -272,7 +272,7 @@ static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group nhmex_uncore_ubox_format_group = {
|
||||
static const struct attribute_group nhmex_uncore_ubox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = nhmex_uncore_ubox_formats_attr,
|
||||
};
|
||||
|
@ -299,7 +299,7 @@ static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group nhmex_uncore_cbox_format_group = {
|
||||
static const struct attribute_group nhmex_uncore_cbox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = nhmex_uncore_cbox_formats_attr,
|
||||
};
|
||||
|
@ -407,7 +407,7 @@ static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group nhmex_uncore_bbox_format_group = {
|
||||
static const struct attribute_group nhmex_uncore_bbox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = nhmex_uncore_bbox_formats_attr,
|
||||
};
|
||||
|
@ -484,7 +484,7 @@ static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group nhmex_uncore_sbox_format_group = {
|
||||
static const struct attribute_group nhmex_uncore_sbox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = nhmex_uncore_sbox_formats_attr,
|
||||
};
|
||||
|
@ -898,7 +898,7 @@ static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group nhmex_uncore_mbox_format_group = {
|
||||
static const struct attribute_group nhmex_uncore_mbox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = nhmex_uncore_mbox_formats_attr,
|
||||
};
|
||||
|
@ -1163,7 +1163,7 @@ static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group nhmex_uncore_rbox_format_group = {
|
||||
static const struct attribute_group nhmex_uncore_rbox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = nhmex_uncore_rbox_formats_attr,
|
||||
};
|
||||
|
|
|
@ -130,7 +130,7 @@ static struct attribute *snb_uncore_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group snb_uncore_format_group = {
|
||||
static const struct attribute_group snb_uncore_format_group = {
|
||||
.name = "format",
|
||||
.attrs = snb_uncore_formats_attr,
|
||||
};
|
||||
|
@ -289,7 +289,7 @@ static struct attribute *snb_uncore_imc_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group snb_uncore_imc_format_group = {
|
||||
static const struct attribute_group snb_uncore_imc_format_group = {
|
||||
.name = "format",
|
||||
.attrs = snb_uncore_imc_formats_attr,
|
||||
};
|
||||
|
@ -769,7 +769,7 @@ static struct attribute *nhm_uncore_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group nhm_uncore_format_group = {
|
||||
static const struct attribute_group nhm_uncore_format_group = {
|
||||
.name = "format",
|
||||
.attrs = nhm_uncore_formats_attr,
|
||||
};
|
||||
|
|
|
@ -602,27 +602,27 @@ static struct uncore_event_desc snbep_uncore_qpi_events[] = {
|
|||
{ /* end: all zeroes */ },
|
||||
};
|
||||
|
||||
static struct attribute_group snbep_uncore_format_group = {
|
||||
static const struct attribute_group snbep_uncore_format_group = {
|
||||
.name = "format",
|
||||
.attrs = snbep_uncore_formats_attr,
|
||||
};
|
||||
|
||||
static struct attribute_group snbep_uncore_ubox_format_group = {
|
||||
static const struct attribute_group snbep_uncore_ubox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = snbep_uncore_ubox_formats_attr,
|
||||
};
|
||||
|
||||
static struct attribute_group snbep_uncore_cbox_format_group = {
|
||||
static const struct attribute_group snbep_uncore_cbox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = snbep_uncore_cbox_formats_attr,
|
||||
};
|
||||
|
||||
static struct attribute_group snbep_uncore_pcu_format_group = {
|
||||
static const struct attribute_group snbep_uncore_pcu_format_group = {
|
||||
.name = "format",
|
||||
.attrs = snbep_uncore_pcu_formats_attr,
|
||||
};
|
||||
|
||||
static struct attribute_group snbep_uncore_qpi_format_group = {
|
||||
static const struct attribute_group snbep_uncore_qpi_format_group = {
|
||||
.name = "format",
|
||||
.attrs = snbep_uncore_qpi_formats_attr,
|
||||
};
|
||||
|
@ -1431,27 +1431,27 @@ static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group ivbep_uncore_format_group = {
|
||||
static const struct attribute_group ivbep_uncore_format_group = {
|
||||
.name = "format",
|
||||
.attrs = ivbep_uncore_formats_attr,
|
||||
};
|
||||
|
||||
static struct attribute_group ivbep_uncore_ubox_format_group = {
|
||||
static const struct attribute_group ivbep_uncore_ubox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = ivbep_uncore_ubox_formats_attr,
|
||||
};
|
||||
|
||||
static struct attribute_group ivbep_uncore_cbox_format_group = {
|
||||
static const struct attribute_group ivbep_uncore_cbox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = ivbep_uncore_cbox_formats_attr,
|
||||
};
|
||||
|
||||
static struct attribute_group ivbep_uncore_pcu_format_group = {
|
||||
static const struct attribute_group ivbep_uncore_pcu_format_group = {
|
||||
.name = "format",
|
||||
.attrs = ivbep_uncore_pcu_formats_attr,
|
||||
};
|
||||
|
||||
static struct attribute_group ivbep_uncore_qpi_format_group = {
|
||||
static const struct attribute_group ivbep_uncore_qpi_format_group = {
|
||||
.name = "format",
|
||||
.attrs = ivbep_uncore_qpi_formats_attr,
|
||||
};
|
||||
|
@ -1887,7 +1887,7 @@ static struct attribute *knl_uncore_ubox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group knl_uncore_ubox_format_group = {
|
||||
static const struct attribute_group knl_uncore_ubox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = knl_uncore_ubox_formats_attr,
|
||||
};
|
||||
|
@ -1927,7 +1927,7 @@ static struct attribute *knl_uncore_cha_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group knl_uncore_cha_format_group = {
|
||||
static const struct attribute_group knl_uncore_cha_format_group = {
|
||||
.name = "format",
|
||||
.attrs = knl_uncore_cha_formats_attr,
|
||||
};
|
||||
|
@ -2037,7 +2037,7 @@ static struct attribute *knl_uncore_pcu_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group knl_uncore_pcu_format_group = {
|
||||
static const struct attribute_group knl_uncore_pcu_format_group = {
|
||||
.name = "format",
|
||||
.attrs = knl_uncore_pcu_formats_attr,
|
||||
};
|
||||
|
@ -2187,7 +2187,7 @@ static struct attribute *knl_uncore_irp_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group knl_uncore_irp_format_group = {
|
||||
static const struct attribute_group knl_uncore_irp_format_group = {
|
||||
.name = "format",
|
||||
.attrs = knl_uncore_irp_formats_attr,
|
||||
};
|
||||
|
@ -2385,7 +2385,7 @@ static struct attribute *hswep_uncore_ubox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group hswep_uncore_ubox_format_group = {
|
||||
static const struct attribute_group hswep_uncore_ubox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = hswep_uncore_ubox_formats_attr,
|
||||
};
|
||||
|
@ -2439,7 +2439,7 @@ static struct attribute *hswep_uncore_cbox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group hswep_uncore_cbox_format_group = {
|
||||
static const struct attribute_group hswep_uncore_cbox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = hswep_uncore_cbox_formats_attr,
|
||||
};
|
||||
|
@ -2621,7 +2621,7 @@ static struct attribute *hswep_uncore_sbox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group hswep_uncore_sbox_format_group = {
|
||||
static const struct attribute_group hswep_uncore_sbox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = hswep_uncore_sbox_formats_attr,
|
||||
};
|
||||
|
@ -3314,7 +3314,7 @@ static struct attribute *skx_uncore_cha_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group skx_uncore_chabox_format_group = {
|
||||
static const struct attribute_group skx_uncore_chabox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = skx_uncore_cha_formats_attr,
|
||||
};
|
||||
|
@ -3427,7 +3427,7 @@ static struct attribute *skx_uncore_iio_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group skx_uncore_iio_format_group = {
|
||||
static const struct attribute_group skx_uncore_iio_format_group = {
|
||||
.name = "format",
|
||||
.attrs = skx_uncore_iio_formats_attr,
|
||||
};
|
||||
|
@ -3484,7 +3484,7 @@ static struct attribute *skx_uncore_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group skx_uncore_format_group = {
|
||||
static const struct attribute_group skx_uncore_format_group = {
|
||||
.name = "format",
|
||||
.attrs = skx_uncore_formats_attr,
|
||||
};
|
||||
|
@ -3605,7 +3605,7 @@ static struct attribute *skx_upi_uncore_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group skx_upi_uncore_format_group = {
|
||||
static const struct attribute_group skx_upi_uncore_format_group = {
|
||||
.name = "format",
|
||||
.attrs = skx_upi_uncore_formats_attr,
|
||||
};
|
||||
|
|
|
@ -286,7 +286,7 @@
|
|||
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
|
||||
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
|
||||
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
|
||||
#define X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE (15*32+15) /* Virtual VMLOAD VMSAVE */
|
||||
#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
|
||||
#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
|
||||
|
|
|
@ -247,11 +247,11 @@ extern int force_personality32;
|
|||
|
||||
/*
|
||||
* This is the base location for PIE (ET_DYN with INTERP) loads. On
|
||||
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
|
||||
* 64-bit, this is above 4GB to leave the entire 32-bit address
|
||||
* space open for things that want to use the area for 32-bit pointers.
|
||||
*/
|
||||
#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
|
||||
0x100000000UL)
|
||||
(TASK_SIZE / 3 * 2))
|
||||
|
||||
/* This yields a mask that user programs can use to figure out what
|
||||
instruction set this CPU supports. This could be done in user space,
|
||||
|
|
|
@ -450,10 +450,10 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate)
|
||||
static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
|
||||
{
|
||||
if (use_xsave()) {
|
||||
copy_kernel_to_xregs(&fpstate->xsave, -1);
|
||||
copy_kernel_to_xregs(&fpstate->xsave, mask);
|
||||
} else {
|
||||
if (use_fxsr())
|
||||
copy_kernel_to_fxregs(&fpstate->fxsave);
|
||||
|
@ -477,7 +477,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
|
|||
: : [addr] "m" (fpstate));
|
||||
}
|
||||
|
||||
__copy_kernel_to_fpregs(fpstate);
|
||||
__copy_kernel_to_fpregs(fpstate, -1);
|
||||
}
|
||||
|
||||
extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
|
||||
|
|
|
@ -43,6 +43,9 @@ struct hypervisor_x86 {
|
|||
|
||||
/* pin current vcpu to specified physical cpu (run rarely) */
|
||||
void (*pin_vcpu)(int);
|
||||
|
||||
/* called during init_mem_mapping() to setup early mappings. */
|
||||
void (*init_mem_mapping)(void);
|
||||
};
|
||||
|
||||
extern const struct hypervisor_x86 *x86_hyper;
|
||||
|
@ -57,8 +60,15 @@ extern const struct hypervisor_x86 x86_hyper_kvm;
|
|||
extern void init_hypervisor_platform(void);
|
||||
extern bool hypervisor_x2apic_available(void);
|
||||
extern void hypervisor_pin_vcpu(int cpu);
|
||||
|
||||
static inline void hypervisor_init_mem_mapping(void)
|
||||
{
|
||||
if (x86_hyper && x86_hyper->init_mem_mapping)
|
||||
x86_hyper->init_mem_mapping();
|
||||
}
|
||||
#else
|
||||
static inline void init_hypervisor_platform(void) { }
|
||||
static inline bool hypervisor_x2apic_available(void) { return false; }
|
||||
static inline void hypervisor_init_mem_mapping(void) { }
|
||||
#endif /* CONFIG_HYPERVISOR_GUEST */
|
||||
#endif /* _ASM_X86_HYPERVISOR_H */
|
||||
|
|
|
@ -492,6 +492,7 @@ struct kvm_vcpu_arch {
|
|||
unsigned long cr4;
|
||||
unsigned long cr4_guest_owned_bits;
|
||||
unsigned long cr8;
|
||||
u32 pkru;
|
||||
u32 hflags;
|
||||
u64 efer;
|
||||
u64 apic_base;
|
||||
|
|
|
@ -40,13 +40,16 @@ static void aperfmperf_snapshot_khz(void *dummy)
|
|||
struct aperfmperf_sample *s = this_cpu_ptr(&samples);
|
||||
ktime_t now = ktime_get();
|
||||
s64 time_delta = ktime_ms_delta(now, s->time);
|
||||
unsigned long flags;
|
||||
|
||||
/* Don't bother re-computing within the cache threshold time. */
|
||||
if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
rdmsrl(MSR_IA32_APERF, aperf);
|
||||
rdmsrl(MSR_IA32_MPERF, mperf);
|
||||
local_irq_restore(flags);
|
||||
|
||||
aperf_delta = aperf - s->aperf;
|
||||
mperf_delta = mperf - s->mperf;
|
||||
|
|
|
@ -122,7 +122,7 @@ static struct attribute *thermal_throttle_attrs[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group thermal_attr_group = {
|
||||
static const struct attribute_group thermal_attr_group = {
|
||||
.attrs = thermal_throttle_attrs,
|
||||
.name = "thermal_throttle"
|
||||
};
|
||||
|
|
|
@ -561,7 +561,7 @@ static struct attribute *mc_default_attrs[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group mc_attr_group = {
|
||||
static const struct attribute_group mc_attr_group = {
|
||||
.attrs = mc_default_attrs,
|
||||
.name = "microcode",
|
||||
};
|
||||
|
@ -707,7 +707,7 @@ static struct attribute *cpu_root_microcode_attrs[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group cpu_root_microcode_group = {
|
||||
static const struct attribute_group cpu_root_microcode_group = {
|
||||
.name = "microcode",
|
||||
.attrs = cpu_root_microcode_attrs,
|
||||
};
|
||||
|
|
|
@ -237,6 +237,18 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ
|
|||
stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask);
|
||||
}
|
||||
|
||||
static void set_mtrr_cpuslocked(unsigned int reg, unsigned long base,
|
||||
unsigned long size, mtrr_type type)
|
||||
{
|
||||
struct set_mtrr_data data = { .smp_reg = reg,
|
||||
.smp_base = base,
|
||||
.smp_size = size,
|
||||
.smp_type = type
|
||||
};
|
||||
|
||||
stop_machine_cpuslocked(mtrr_rendezvous_handler, &data, cpu_online_mask);
|
||||
}
|
||||
|
||||
static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base,
|
||||
unsigned long size, mtrr_type type)
|
||||
{
|
||||
|
@ -370,7 +382,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
|
|||
/* Search for an empty MTRR */
|
||||
i = mtrr_if->get_free_region(base, size, replace);
|
||||
if (i >= 0) {
|
||||
set_mtrr(i, base, size, type);
|
||||
set_mtrr_cpuslocked(i, base, size, type);
|
||||
if (likely(replace < 0)) {
|
||||
mtrr_usage_table[i] = 1;
|
||||
} else {
|
||||
|
@ -378,7 +390,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
|
|||
if (increment)
|
||||
mtrr_usage_table[i]++;
|
||||
if (unlikely(replace != i)) {
|
||||
set_mtrr(replace, 0, 0, 0);
|
||||
set_mtrr_cpuslocked(replace, 0, 0, 0);
|
||||
mtrr_usage_table[replace] = 0;
|
||||
}
|
||||
}
|
||||
|
@ -506,7 +518,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
|
|||
goto out;
|
||||
}
|
||||
if (--mtrr_usage_table[reg] < 1)
|
||||
set_mtrr(reg, 0, 0, 0);
|
||||
set_mtrr_cpuslocked(reg, 0, 0, 0);
|
||||
error = reg;
|
||||
out:
|
||||
mutex_unlock(&mtrr_mutex);
|
||||
|
|
|
@ -53,6 +53,7 @@ void __head __startup_64(unsigned long physaddr)
|
|||
pudval_t *pud;
|
||||
pmdval_t *pmd, pmd_entry;
|
||||
int i;
|
||||
unsigned int *next_pgt_ptr;
|
||||
|
||||
/* Is the address too large? */
|
||||
if (physaddr >> MAX_PHYSMEM_BITS)
|
||||
|
@ -91,9 +92,9 @@ void __head __startup_64(unsigned long physaddr)
|
|||
* creates a bunch of nonsense entries but that is fine --
|
||||
* it avoids problems around wraparound.
|
||||
*/
|
||||
|
||||
pud = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
|
||||
pmd = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
|
||||
next_pgt_ptr = fixup_pointer(&next_early_pgt, physaddr);
|
||||
pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
|
||||
pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
|
||||
|
||||
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
|
||||
p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
|
||||
|
|
|
@ -55,7 +55,7 @@ static struct bin_attribute *boot_params_data_attrs[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group boot_params_attr_group = {
|
||||
static const struct attribute_group boot_params_attr_group = {
|
||||
.attrs = boot_params_version_attrs,
|
||||
.bin_attrs = boot_params_data_attrs,
|
||||
};
|
||||
|
@ -202,7 +202,7 @@ static struct bin_attribute *setup_data_data_attrs[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group setup_data_attr_group = {
|
||||
static const struct attribute_group setup_data_attr_group = {
|
||||
.attrs = setup_data_type_attrs,
|
||||
.bin_attrs = setup_data_data_attrs,
|
||||
};
|
||||
|
|
|
@ -971,7 +971,8 @@ void common_cpu_up(unsigned int cpu, struct task_struct *idle)
|
|||
* Returns zero if CPU booted OK, else error code from
|
||||
* ->wakeup_secondary_cpu.
|
||||
*/
|
||||
static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
||||
static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
|
||||
int *cpu0_nmi_registered)
|
||||
{
|
||||
volatile u32 *trampoline_status =
|
||||
(volatile u32 *) __va(real_mode_header->trampoline_status);
|
||||
|
@ -979,7 +980,6 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
|||
unsigned long start_ip = real_mode_header->trampoline_start;
|
||||
|
||||
unsigned long boot_error = 0;
|
||||
int cpu0_nmi_registered = 0;
|
||||
unsigned long timeout;
|
||||
|
||||
idle->thread.sp = (unsigned long)task_pt_regs(idle);
|
||||
|
@ -1035,7 +1035,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
|||
boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
|
||||
else
|
||||
boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
|
||||
&cpu0_nmi_registered);
|
||||
cpu0_nmi_registered);
|
||||
|
||||
if (!boot_error) {
|
||||
/*
|
||||
|
@ -1080,12 +1080,6 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
|||
*/
|
||||
smpboot_restore_warm_reset_vector();
|
||||
}
|
||||
/*
|
||||
* Clean up the nmi handler. Do this after the callin and callout sync
|
||||
* to avoid impact of possible long unregister time.
|
||||
*/
|
||||
if (cpu0_nmi_registered)
|
||||
unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
|
||||
|
||||
return boot_error;
|
||||
}
|
||||
|
@ -1093,8 +1087,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
|||
int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
int apicid = apic->cpu_present_to_apicid(cpu);
|
||||
int cpu0_nmi_registered = 0;
|
||||
unsigned long flags;
|
||||
int err;
|
||||
int err, ret = 0;
|
||||
|
||||
WARN_ON(irqs_disabled());
|
||||
|
||||
|
@ -1131,10 +1126,11 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
|
|||
|
||||
common_cpu_up(cpu, tidle);
|
||||
|
||||
err = do_boot_cpu(apicid, cpu, tidle);
|
||||
err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered);
|
||||
if (err) {
|
||||
pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
|
||||
return -EIO;
|
||||
ret = -EIO;
|
||||
goto unreg_nmi;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1150,7 +1146,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
|
|||
touch_nmi_watchdog();
|
||||
}
|
||||
|
||||
return 0;
|
||||
unreg_nmi:
|
||||
/*
|
||||
* Clean up the nmi handler. Do this after the callin and callout sync
|
||||
* to avoid impact of possible long unregister time.
|
||||
*/
|
||||
if (cpu0_nmi_registered)
|
||||
unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -474,7 +474,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
|||
entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
|
||||
cpuid_mask(&entry->ecx, CPUID_7_ECX);
|
||||
/* PKU is not yet implemented for shadow paging. */
|
||||
if (!tdp_enabled)
|
||||
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
|
||||
entry->ecx &= ~F(PKU);
|
||||
entry->edx &= kvm_cpuid_7_0_edx_x86_features;
|
||||
entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX);
|
||||
|
|
|
@ -84,11 +84,6 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
|
|||
| ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
|
||||
}
|
||||
|
||||
static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_x86_ops->get_pkru(vcpu);
|
||||
}
|
||||
|
||||
static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hflags |= HF_GUEST_MASK;
|
||||
|
|
|
@ -172,7 +172,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
|||
* index of the protection domain, so pte_pkey * 2 is
|
||||
* is the index of the first bit for the domain.
|
||||
*/
|
||||
pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
|
||||
pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
|
||||
|
||||
/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
|
||||
offset = (pfec & ~1) +
|
||||
|
|
|
@ -1117,7 +1117,7 @@ static __init int svm_hardware_setup(void)
|
|||
|
||||
if (vls) {
|
||||
if (!npt_enabled ||
|
||||
!boot_cpu_has(X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE) ||
|
||||
!boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
|
||||
!IS_ENABLED(CONFIG_X86_64)) {
|
||||
vls = false;
|
||||
} else {
|
||||
|
@ -1790,11 +1790,6 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
|
|||
to_svm(vcpu)->vmcb->save.rflags = rflags;
|
||||
}
|
||||
|
||||
static u32 svm_get_pkru(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
|
||||
{
|
||||
switch (reg) {
|
||||
|
@ -5436,8 +5431,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
|
|||
.get_rflags = svm_get_rflags,
|
||||
.set_rflags = svm_set_rflags,
|
||||
|
||||
.get_pkru = svm_get_pkru,
|
||||
|
||||
.tlb_flush = svm_flush_tlb,
|
||||
|
||||
.run = svm_vcpu_run,
|
||||
|
|
|
@ -641,8 +641,6 @@ struct vcpu_vmx {
|
|||
|
||||
u64 current_tsc_ratio;
|
||||
|
||||
bool guest_pkru_valid;
|
||||
u32 guest_pkru;
|
||||
u32 host_pkru;
|
||||
|
||||
/*
|
||||
|
@ -2398,11 +2396,6 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
|
|||
to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
|
||||
}
|
||||
|
||||
static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_vmx(vcpu)->guest_pkru;
|
||||
}
|
||||
|
||||
static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
|
||||
|
@ -9291,8 +9284,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
|
||||
vmx_set_interrupt_shadow(vcpu, 0);
|
||||
|
||||
if (vmx->guest_pkru_valid)
|
||||
__write_pkru(vmx->guest_pkru);
|
||||
if (static_cpu_has(X86_FEATURE_PKU) &&
|
||||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
|
||||
vcpu->arch.pkru != vmx->host_pkru)
|
||||
__write_pkru(vcpu->arch.pkru);
|
||||
|
||||
atomic_switch_perf_msrs(vmx);
|
||||
debugctlmsr = get_debugctlmsr();
|
||||
|
@ -9440,13 +9435,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
* back on host, so it is safe to read guest PKRU from current
|
||||
* XSAVE.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_OSPKE)) {
|
||||
vmx->guest_pkru = __read_pkru();
|
||||
if (vmx->guest_pkru != vmx->host_pkru) {
|
||||
vmx->guest_pkru_valid = true;
|
||||
if (static_cpu_has(X86_FEATURE_PKU) &&
|
||||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
|
||||
vcpu->arch.pkru = __read_pkru();
|
||||
if (vcpu->arch.pkru != vmx->host_pkru)
|
||||
__write_pkru(vmx->host_pkru);
|
||||
} else
|
||||
vmx->guest_pkru_valid = false;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -11964,8 +11957,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
|||
.get_rflags = vmx_get_rflags,
|
||||
.set_rflags = vmx_set_rflags,
|
||||
|
||||
.get_pkru = vmx_get_pkru,
|
||||
|
||||
.tlb_flush = vmx_flush_tlb,
|
||||
|
||||
.run = vmx_vcpu_run,
|
||||
|
|
|
@ -3269,7 +3269,12 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
|
|||
u32 size, offset, ecx, edx;
|
||||
cpuid_count(XSTATE_CPUID, index,
|
||||
&size, &offset, &ecx, &edx);
|
||||
memcpy(dest + offset, src, size);
|
||||
if (feature == XFEATURE_MASK_PKRU)
|
||||
memcpy(dest + offset, &vcpu->arch.pkru,
|
||||
sizeof(vcpu->arch.pkru));
|
||||
else
|
||||
memcpy(dest + offset, src, size);
|
||||
|
||||
}
|
||||
|
||||
valid -= feature;
|
||||
|
@ -3307,7 +3312,11 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
|
|||
u32 size, offset, ecx, edx;
|
||||
cpuid_count(XSTATE_CPUID, index,
|
||||
&size, &offset, &ecx, &edx);
|
||||
memcpy(dest, src + offset, size);
|
||||
if (feature == XFEATURE_MASK_PKRU)
|
||||
memcpy(&vcpu->arch.pkru, src + offset,
|
||||
sizeof(vcpu->arch.pkru));
|
||||
else
|
||||
memcpy(dest, src + offset, size);
|
||||
}
|
||||
|
||||
valid -= feature;
|
||||
|
@ -7667,7 +7676,9 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
vcpu->guest_fpu_loaded = 1;
|
||||
__kernel_fpu_begin();
|
||||
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
|
||||
/* PKRU is separately restored in kvm_x86_ops->run. */
|
||||
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
|
||||
~XFEATURE_MASK_PKRU);
|
||||
trace_kvm_fpu(1);
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <asm/dma.h> /* for MAX_DMA_PFN */
|
||||
#include <asm/microcode.h>
|
||||
#include <asm/kaslr.h>
|
||||
#include <asm/hypervisor.h>
|
||||
|
||||
/*
|
||||
* We need to define the tracepoints somewhere, and tlb.c
|
||||
|
@ -636,6 +637,8 @@ void __init init_mem_mapping(void)
|
|||
load_cr3(swapper_pg_dir);
|
||||
__flush_tlb_all();
|
||||
|
||||
hypervisor_init_mem_mapping();
|
||||
|
||||
early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
|
|
|
@ -50,8 +50,7 @@ unsigned long tasksize_64bit(void)
|
|||
static unsigned long stack_maxrandom_size(unsigned long task_size)
|
||||
{
|
||||
unsigned long max = 0;
|
||||
if ((current->flags & PF_RANDOMIZE) &&
|
||||
!(current->personality & ADDR_NO_RANDOMIZE)) {
|
||||
if (current->flags & PF_RANDOMIZE) {
|
||||
max = (-1UL) & __STACK_RND_MASK(task_size == tasksize_32bit());
|
||||
max <<= PAGE_SHIFT;
|
||||
}
|
||||
|
@ -79,13 +78,13 @@ static int mmap_is_legacy(void)
|
|||
|
||||
static unsigned long arch_rnd(unsigned int rndbits)
|
||||
{
|
||||
if (!(current->flags & PF_RANDOMIZE))
|
||||
return 0;
|
||||
return (get_random_long() & ((1UL << rndbits) - 1)) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
unsigned long arch_mmap_rnd(void)
|
||||
{
|
||||
if (!(current->flags & PF_RANDOMIZE))
|
||||
return 0;
|
||||
return arch_rnd(mmap_is_ia32() ? mmap32_rnd_bits : mmap64_rnd_bits);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
static struct bau_operations ops __ro_after_init;
|
||||
|
||||
/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
|
||||
static int timeout_base_ns[] = {
|
||||
static const int timeout_base_ns[] = {
|
||||
20,
|
||||
160,
|
||||
1280,
|
||||
|
@ -1216,7 +1216,7 @@ static struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
|
|||
* set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
|
||||
* Such a message must be ignored.
|
||||
*/
|
||||
void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
|
||||
static void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
|
||||
{
|
||||
unsigned long mmr_image;
|
||||
unsigned char swack_vec;
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <asm/setup.h>
|
||||
#include <asm/hypervisor.h>
|
||||
#include <asm/e820/api.h>
|
||||
#include <asm/early_ioremap.h>
|
||||
|
||||
#include <asm/xen/cpuid.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
|
@ -21,38 +22,50 @@
|
|||
#include "mmu.h"
|
||||
#include "smp.h"
|
||||
|
||||
void __ref xen_hvm_init_shared_info(void)
|
||||
static unsigned long shared_info_pfn;
|
||||
|
||||
void xen_hvm_init_shared_info(void)
|
||||
{
|
||||
struct xen_add_to_physmap xatp;
|
||||
u64 pa;
|
||||
|
||||
if (HYPERVISOR_shared_info == &xen_dummy_shared_info) {
|
||||
/*
|
||||
* Search for a free page starting at 4kB physical address.
|
||||
* Low memory is preferred to avoid an EPT large page split up
|
||||
* by the mapping.
|
||||
* Starting below X86_RESERVE_LOW (usually 64kB) is fine as
|
||||
* the BIOS used for HVM guests is well behaved and won't
|
||||
* clobber memory other than the first 4kB.
|
||||
*/
|
||||
for (pa = PAGE_SIZE;
|
||||
!e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
|
||||
memblock_is_reserved(pa);
|
||||
pa += PAGE_SIZE)
|
||||
;
|
||||
|
||||
memblock_reserve(pa, PAGE_SIZE);
|
||||
HYPERVISOR_shared_info = __va(pa);
|
||||
}
|
||||
|
||||
xatp.domid = DOMID_SELF;
|
||||
xatp.idx = 0;
|
||||
xatp.space = XENMAPSPACE_shared_info;
|
||||
xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
|
||||
xatp.gpfn = shared_info_pfn;
|
||||
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
|
||||
BUG();
|
||||
}
|
||||
|
||||
static void __init reserve_shared_info(void)
|
||||
{
|
||||
u64 pa;
|
||||
|
||||
/*
|
||||
* Search for a free page starting at 4kB physical address.
|
||||
* Low memory is preferred to avoid an EPT large page split up
|
||||
* by the mapping.
|
||||
* Starting below X86_RESERVE_LOW (usually 64kB) is fine as
|
||||
* the BIOS used for HVM guests is well behaved and won't
|
||||
* clobber memory other than the first 4kB.
|
||||
*/
|
||||
for (pa = PAGE_SIZE;
|
||||
!e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
|
||||
memblock_is_reserved(pa);
|
||||
pa += PAGE_SIZE)
|
||||
;
|
||||
|
||||
shared_info_pfn = PHYS_PFN(pa);
|
||||
|
||||
memblock_reserve(pa, PAGE_SIZE);
|
||||
HYPERVISOR_shared_info = early_memremap(pa, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static void __init xen_hvm_init_mem_mapping(void)
|
||||
{
|
||||
early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
|
||||
HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
|
||||
}
|
||||
|
||||
static void __init init_hvm_pv_info(void)
|
||||
{
|
||||
int major, minor;
|
||||
|
@ -153,6 +166,7 @@ static void __init xen_hvm_guest_init(void)
|
|||
|
||||
init_hvm_pv_info();
|
||||
|
||||
reserve_shared_info();
|
||||
xen_hvm_init_shared_info();
|
||||
|
||||
/*
|
||||
|
@ -218,5 +232,6 @@ const struct hypervisor_x86 x86_hyper_xen_hvm = {
|
|||
.init_platform = xen_hvm_guest_init,
|
||||
.pin_vcpu = xen_pin_vcpu,
|
||||
.x2apic_available = xen_x2apic_para_available,
|
||||
.init_mem_mapping = xen_hvm_init_mem_mapping,
|
||||
};
|
||||
EXPORT_SYMBOL(x86_hyper_xen_hvm);
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
generic-y += bug.h
|
||||
generic-y += clkdev.h
|
||||
generic-y += device.h
|
||||
generic-y += div64.h
|
||||
generic-y += dma-contiguous.h
|
||||
generic-y += emergency-restart.h
|
||||
|
@ -17,6 +18,7 @@ generic-y += local.h
|
|||
generic-y += local64.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += param.h
|
||||
generic-y += percpu.h
|
||||
generic-y += preempt.h
|
||||
generic-y += rwsem.h
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
/*
|
||||
* Arch specific extensions to struct device
|
||||
*
|
||||
* This file is released under the GPLv2
|
||||
*/
|
||||
#ifndef _ASM_XTENSA_DEVICE_H
|
||||
#define _ASM_XTENSA_DEVICE_H
|
||||
|
||||
struct dev_archdata {
|
||||
};
|
||||
|
||||
struct pdev_archdata {
|
||||
};
|
||||
|
||||
#endif /* _ASM_XTENSA_DEVICE_H */
|
|
@ -1,18 +0,0 @@
|
|||
/*
|
||||
* include/asm-xtensa/param.h
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2001 - 2005 Tensilica Inc.
|
||||
*/
|
||||
#ifndef _XTENSA_PARAM_H
|
||||
#define _XTENSA_PARAM_H
|
||||
|
||||
#include <uapi/asm/param.h>
|
||||
|
||||
# define HZ CONFIG_HZ /* internal timer frequency */
|
||||
# define USER_HZ 100 /* for user interfaces in "ticks" */
|
||||
# define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */
|
||||
#endif /* _XTENSA_PARAM_H */
|
|
@ -94,13 +94,11 @@ unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v)
|
|||
}
|
||||
EXPORT_SYMBOL(__sync_fetch_and_or_4);
|
||||
|
||||
#ifdef CONFIG_NET
|
||||
/*
|
||||
* Networking support
|
||||
*/
|
||||
EXPORT_SYMBOL(csum_partial);
|
||||
EXPORT_SYMBOL(csum_partial_copy_generic);
|
||||
#endif /* CONFIG_NET */
|
||||
|
||||
/*
|
||||
* Architecture-specific symbols
|
||||
|
|
|
@ -103,6 +103,7 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
|
|||
clear_page_alias(kvaddr, paddr);
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(clear_user_highpage);
|
||||
|
||||
void copy_user_highpage(struct page *dst, struct page *src,
|
||||
unsigned long vaddr, struct vm_area_struct *vma)
|
||||
|
@ -119,10 +120,7 @@ void copy_user_highpage(struct page *dst, struct page *src,
|
|||
copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
|
||||
|
||||
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
|
||||
EXPORT_SYMBOL(copy_user_highpage);
|
||||
|
||||
/*
|
||||
* Any time the kernel writes to a user page cache page, or it is about to
|
||||
|
@ -176,7 +174,7 @@ void flush_dcache_page(struct page *page)
|
|||
|
||||
/* There shouldn't be an entry in the cache for this page anymore. */
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(flush_dcache_page);
|
||||
|
||||
/*
|
||||
* For now, flush the whole cache. FIXME??
|
||||
|
@ -188,6 +186,7 @@ void local_flush_cache_range(struct vm_area_struct *vma,
|
|||
__flush_invalidate_dcache_all();
|
||||
__invalidate_icache_all();
|
||||
}
|
||||
EXPORT_SYMBOL(local_flush_cache_range);
|
||||
|
||||
/*
|
||||
* Remove any entry in the cache for this page.
|
||||
|
@ -207,8 +206,9 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
|
|||
__flush_invalidate_dcache_page_alias(virt, phys);
|
||||
__invalidate_icache_page_alias(virt, phys);
|
||||
}
|
||||
EXPORT_SYMBOL(local_flush_cache_page);
|
||||
|
||||
#endif
|
||||
#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
|
||||
|
||||
void
|
||||
update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
|
||||
|
@ -225,7 +225,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
|
|||
|
||||
flush_tlb_page(vma, addr);
|
||||
|
||||
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
|
||||
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
|
||||
|
||||
if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
|
||||
unsigned long phys = page_to_phys(page);
|
||||
|
@ -256,7 +256,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
|
|||
* flush_dcache_page() on the page.
|
||||
*/
|
||||
|
||||
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
|
||||
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
|
||||
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long vaddr, void *dst, const void *src,
|
||||
|
|
|
@ -71,17 +71,29 @@ struct bfq_service_tree {
|
|||
*
|
||||
* bfq_sched_data is the basic scheduler queue. It supports three
|
||||
* ioprio_classes, and can be used either as a toplevel queue or as an
|
||||
* intermediate queue on a hierarchical setup. @next_in_service
|
||||
* points to the active entity of the sched_data service trees that
|
||||
* will be scheduled next. It is used to reduce the number of steps
|
||||
* needed for each hierarchical-schedule update.
|
||||
* intermediate queue in a hierarchical setup.
|
||||
*
|
||||
* The supported ioprio_classes are the same as in CFQ, in descending
|
||||
* priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
|
||||
* Requests from higher priority queues are served before all the
|
||||
* requests from lower priority queues; among requests of the same
|
||||
* queue requests are served according to B-WF2Q+.
|
||||
* All the fields are protected by the queue lock of the containing bfqd.
|
||||
*
|
||||
* The schedule is implemented by the service trees, plus the field
|
||||
* @next_in_service, which points to the entity on the active trees
|
||||
* that will be served next, if 1) no changes in the schedule occurs
|
||||
* before the current in-service entity is expired, 2) the in-service
|
||||
* queue becomes idle when it expires, and 3) if the entity pointed by
|
||||
* in_service_entity is not a queue, then the in-service child entity
|
||||
* of the entity pointed by in_service_entity becomes idle on
|
||||
* expiration. This peculiar definition allows for the following
|
||||
* optimization, not yet exploited: while a given entity is still in
|
||||
* service, we already know which is the best candidate for next
|
||||
* service among the other active entitities in the same parent
|
||||
* entity. We can then quickly compare the timestamps of the
|
||||
* in-service entity with those of such best candidate.
|
||||
*
|
||||
* All fields are protected by the lock of the containing bfqd.
|
||||
*/
|
||||
struct bfq_sched_data {
|
||||
/* entity in service */
|
||||
|
|
146
block/bfq-wf2q.c
146
block/bfq-wf2q.c
|
@ -188,21 +188,23 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
|
|||
|
||||
/*
|
||||
* This function tells whether entity stops being a candidate for next
|
||||
* service, according to the following logic.
|
||||
* service, according to the restrictive definition of the field
|
||||
* next_in_service. In particular, this function is invoked for an
|
||||
* entity that is about to be set in service.
|
||||
*
|
||||
* This function is invoked for an entity that is about to be set in
|
||||
* service. If such an entity is a queue, then the entity is no longer
|
||||
* a candidate for next service (i.e, a candidate entity to serve
|
||||
* after the in-service entity is expired). The function then returns
|
||||
* true.
|
||||
* If entity is a queue, then the entity is no longer a candidate for
|
||||
* next service according to the that definition, because entity is
|
||||
* about to become the in-service queue. This function then returns
|
||||
* true if entity is a queue.
|
||||
*
|
||||
* In contrast, the entity could stil be a candidate for next service
|
||||
* if it is not a queue, and has more than one child. In fact, even if
|
||||
* one of its children is about to be set in service, other children
|
||||
* may still be the next to serve. As a consequence, a non-queue
|
||||
* entity is not a candidate for next-service only if it has only one
|
||||
* child. And only if this condition holds, then the function returns
|
||||
* true for a non-queue entity.
|
||||
* In contrast, entity could still be a candidate for next service if
|
||||
* it is not a queue, and has more than one active child. In fact,
|
||||
* even if one of its children is about to be set in service, other
|
||||
* active children may still be the next to serve, for the parent
|
||||
* entity, even according to the above definition. As a consequence, a
|
||||
* non-queue entity is not a candidate for next-service only if it has
|
||||
* only one active child. And only if this condition holds, then this
|
||||
* function returns true for a non-queue entity.
|
||||
*/
|
||||
static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
|
||||
{
|
||||
|
@ -213,6 +215,18 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
|
|||
|
||||
bfqg = container_of(entity, struct bfq_group, entity);
|
||||
|
||||
/*
|
||||
* The field active_entities does not always contain the
|
||||
* actual number of active children entities: it happens to
|
||||
* not account for the in-service entity in case the latter is
|
||||
* removed from its active tree (which may get done after
|
||||
* invoking the function bfq_no_longer_next_in_service in
|
||||
* bfq_get_next_queue). Fortunately, here, i.e., while
|
||||
* bfq_no_longer_next_in_service is not yet completed in
|
||||
* bfq_get_next_queue, bfq_active_extract has not yet been
|
||||
* invoked, and thus active_entities still coincides with the
|
||||
* actual number of active entities.
|
||||
*/
|
||||
if (bfqg->active_entities == 1)
|
||||
return true;
|
||||
|
||||
|
@ -954,7 +968,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
|
|||
* one of its children receives a new request.
|
||||
*
|
||||
* Basically, this function updates the timestamps of entity and
|
||||
* inserts entity into its active tree, ater possible extracting it
|
||||
* inserts entity into its active tree, ater possibly extracting it
|
||||
* from its idle tree.
|
||||
*/
|
||||
static void __bfq_activate_entity(struct bfq_entity *entity,
|
||||
|
@ -1048,7 +1062,7 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
|
|||
entity->start = entity->finish;
|
||||
/*
|
||||
* In addition, if the entity had more than one child
|
||||
* when set in service, then was not extracted from
|
||||
* when set in service, then it was not extracted from
|
||||
* the active tree. This implies that the position of
|
||||
* the entity in the active tree may need to be
|
||||
* changed now, because we have just updated the start
|
||||
|
@ -1056,9 +1070,8 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
|
|||
* time in a moment (the requeueing is then, more
|
||||
* precisely, a repositioning in this case). To
|
||||
* implement this repositioning, we: 1) dequeue the
|
||||
* entity here, 2) update the finish time and
|
||||
* requeue the entity according to the new
|
||||
* timestamps below.
|
||||
* entity here, 2) update the finish time and requeue
|
||||
* the entity according to the new timestamps below.
|
||||
*/
|
||||
if (entity->tree)
|
||||
bfq_active_extract(st, entity);
|
||||
|
@ -1105,9 +1118,10 @@ static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
|
|||
|
||||
|
||||
/**
|
||||
* bfq_activate_entity - activate or requeue an entity representing a bfq_queue,
|
||||
* and activate, requeue or reposition all ancestors
|
||||
* for which such an update becomes necessary.
|
||||
* bfq_activate_requeue_entity - activate or requeue an entity representing a
|
||||
* bfq_queue, and activate, requeue or reposition
|
||||
* all ancestors for which such an update becomes
|
||||
* necessary.
|
||||
* @entity: the entity to activate.
|
||||
* @non_blocking_wait_rq: true if this entity was waiting for a request
|
||||
* @requeue: true if this is a requeue, which implies that bfqq is
|
||||
|
@ -1135,9 +1149,9 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
|
|||
* @ins_into_idle_tree: if false, the entity will not be put into the
|
||||
* idle tree.
|
||||
*
|
||||
* Deactivates an entity, independently from its previous state. Must
|
||||
* Deactivates an entity, independently of its previous state. Must
|
||||
* be invoked only if entity is on a service tree. Extracts the entity
|
||||
* from that tree, and if necessary and allowed, puts it on the idle
|
||||
* from that tree, and if necessary and allowed, puts it into the idle
|
||||
* tree.
|
||||
*/
|
||||
bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
|
||||
|
@ -1158,8 +1172,10 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
|
|||
st = bfq_entity_service_tree(entity);
|
||||
is_in_service = entity == sd->in_service_entity;
|
||||
|
||||
if (is_in_service)
|
||||
if (is_in_service) {
|
||||
bfq_calc_finish(entity, entity->service);
|
||||
sd->in_service_entity = NULL;
|
||||
}
|
||||
|
||||
if (entity->tree == &st->active)
|
||||
bfq_active_extract(st, entity);
|
||||
|
@ -1177,7 +1193,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
|
|||
/**
|
||||
* bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
|
||||
* @entity: the entity to deactivate.
|
||||
* @ins_into_idle_tree: true if the entity can be put on the idle tree
|
||||
* @ins_into_idle_tree: true if the entity can be put into the idle tree
|
||||
*/
|
||||
static void bfq_deactivate_entity(struct bfq_entity *entity,
|
||||
bool ins_into_idle_tree,
|
||||
|
@ -1208,16 +1224,29 @@ static void bfq_deactivate_entity(struct bfq_entity *entity,
|
|||
*/
|
||||
bfq_update_next_in_service(sd, NULL);
|
||||
|
||||
if (sd->next_in_service)
|
||||
if (sd->next_in_service || sd->in_service_entity) {
|
||||
/*
|
||||
* The parent entity is still backlogged,
|
||||
* because next_in_service is not NULL. So, no
|
||||
* further upwards deactivation must be
|
||||
* performed. Yet, next_in_service has
|
||||
* changed. Then the schedule does need to be
|
||||
* updated upwards.
|
||||
* The parent entity is still active, because
|
||||
* either next_in_service or in_service_entity
|
||||
* is not NULL. So, no further upwards
|
||||
* deactivation must be performed. Yet,
|
||||
* next_in_service has changed. Then the
|
||||
* schedule does need to be updated upwards.
|
||||
*
|
||||
* NOTE If in_service_entity is not NULL, then
|
||||
* next_in_service may happen to be NULL,
|
||||
* although the parent entity is evidently
|
||||
* active. This happens if 1) the entity
|
||||
* pointed by in_service_entity is the only
|
||||
* active entity in the parent entity, and 2)
|
||||
* according to the definition of
|
||||
* next_in_service, the in_service_entity
|
||||
* cannot be considered as
|
||||
* next_in_service. See the comments on the
|
||||
* definition of next_in_service for details.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we get here, then the parent is no more
|
||||
|
@ -1494,47 +1523,34 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
|
|||
|
||||
/*
|
||||
* If entity is no longer a candidate for next
|
||||
* service, then we extract it from its active tree,
|
||||
* for the following reason. To further boost the
|
||||
* throughput in some special case, BFQ needs to know
|
||||
* which is the next candidate entity to serve, while
|
||||
* there is already an entity in service. In this
|
||||
* respect, to make it easy to compute/update the next
|
||||
* candidate entity to serve after the current
|
||||
* candidate has been set in service, there is a case
|
||||
* where it is necessary to extract the current
|
||||
* candidate from its service tree. Such a case is
|
||||
* when the entity just set in service cannot be also
|
||||
* a candidate for next service. Details about when
|
||||
* this conditions holds are reported in the comments
|
||||
* on the function bfq_no_longer_next_in_service()
|
||||
* invoked below.
|
||||
* service, then it must be extracted from its active
|
||||
* tree, so as to make sure that it won't be
|
||||
* considered when computing next_in_service. See the
|
||||
* comments on the function
|
||||
* bfq_no_longer_next_in_service() for details.
|
||||
*/
|
||||
if (bfq_no_longer_next_in_service(entity))
|
||||
bfq_active_extract(bfq_entity_service_tree(entity),
|
||||
entity);
|
||||
|
||||
/*
|
||||
* For the same reason why we may have just extracted
|
||||
* entity from its active tree, we may need to update
|
||||
* next_in_service for the sched_data of entity too,
|
||||
* regardless of whether entity has been extracted.
|
||||
* In fact, even if entity has not been extracted, a
|
||||
* descendant entity may get extracted. Such an event
|
||||
* would cause a change in next_in_service for the
|
||||
* level of the descendant entity, and thus possibly
|
||||
* back to upper levels.
|
||||
* Even if entity is not to be extracted according to
|
||||
* the above check, a descendant entity may get
|
||||
* extracted in one of the next iterations of this
|
||||
* loop. Such an event could cause a change in
|
||||
* next_in_service for the level of the descendant
|
||||
* entity, and thus possibly back to this level.
|
||||
*
|
||||
* We cannot perform the resulting needed update
|
||||
* before the end of this loop, because, to know which
|
||||
* is the correct next-to-serve candidate entity for
|
||||
* each level, we need first to find the leaf entity
|
||||
* to set in service. In fact, only after we know
|
||||
* which is the next-to-serve leaf entity, we can
|
||||
* discover whether the parent entity of the leaf
|
||||
* entity becomes the next-to-serve, and so on.
|
||||
* However, we cannot perform the resulting needed
|
||||
* update of next_in_service for this level before the
|
||||
* end of the whole loop, because, to know which is
|
||||
* the correct next-to-serve candidate entity for each
|
||||
* level, we need first to find the leaf entity to set
|
||||
* in service. In fact, only after we know which is
|
||||
* the next-to-serve leaf entity, we can discover
|
||||
* whether the parent entity of the leaf entity
|
||||
* becomes the next-to-serve, and so on.
|
||||
*/
|
||||
|
||||
}
|
||||
|
||||
bfqq = bfq_entity_to_bfqq(entity);
|
||||
|
|
|
@ -387,9 +387,11 @@ static void bio_integrity_verify_fn(struct work_struct *work)
|
|||
*/
|
||||
bool __bio_integrity_endio(struct bio *bio)
|
||||
{
|
||||
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status) {
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
|
||||
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
|
||||
(bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) {
|
||||
INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
|
||||
queue_work(kintegrityd_wq, &bip->bip_work);
|
||||
return false;
|
||||
|
|
|
@ -36,12 +36,18 @@ int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev)
|
|||
for (queue = 0; queue < set->nr_hw_queues; queue++) {
|
||||
mask = pci_irq_get_affinity(pdev, queue);
|
||||
if (!mask)
|
||||
return -EINVAL;
|
||||
goto fallback;
|
||||
|
||||
for_each_cpu(cpu, mask)
|
||||
set->mq_map[cpu] = queue;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fallback:
|
||||
WARN_ON_ONCE(set->nr_hw_queues > 1);
|
||||
for_each_possible_cpu(cpu)
|
||||
set->mq_map[cpu] = 0;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
|
||||
|
|
|
@ -301,11 +301,12 @@ static struct request *blk_mq_get_request(struct request_queue *q,
|
|||
struct elevator_queue *e = q->elevator;
|
||||
struct request *rq;
|
||||
unsigned int tag;
|
||||
struct blk_mq_ctx *local_ctx = NULL;
|
||||
|
||||
blk_queue_enter_live(q);
|
||||
data->q = q;
|
||||
if (likely(!data->ctx))
|
||||
data->ctx = blk_mq_get_ctx(q);
|
||||
data->ctx = local_ctx = blk_mq_get_ctx(q);
|
||||
if (likely(!data->hctx))
|
||||
data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
|
||||
if (op & REQ_NOWAIT)
|
||||
|
@ -324,6 +325,10 @@ static struct request *blk_mq_get_request(struct request_queue *q,
|
|||
|
||||
tag = blk_mq_get_tag(data);
|
||||
if (tag == BLK_MQ_TAG_FAIL) {
|
||||
if (local_ctx) {
|
||||
blk_mq_put_ctx(local_ctx);
|
||||
data->ctx = NULL;
|
||||
}
|
||||
blk_queue_exit(q);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -355,13 +360,13 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
|
|||
return ERR_PTR(ret);
|
||||
|
||||
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
|
||||
|
||||
blk_mq_put_ctx(alloc_data.ctx);
|
||||
blk_queue_exit(q);
|
||||
|
||||
if (!rq)
|
||||
return ERR_PTR(-EWOULDBLOCK);
|
||||
|
||||
blk_mq_put_ctx(alloc_data.ctx);
|
||||
|
||||
rq->__data_len = 0;
|
||||
rq->__sector = (sector_t) -1;
|
||||
rq->bio = rq->biotail = NULL;
|
||||
|
@ -406,7 +411,6 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
|||
alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
|
||||
|
||||
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
|
||||
|
||||
blk_queue_exit(q);
|
||||
|
||||
if (!rq)
|
||||
|
@ -679,8 +683,8 @@ EXPORT_SYMBOL(blk_mq_kick_requeue_list);
|
|||
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
|
||||
unsigned long msecs)
|
||||
{
|
||||
kblockd_schedule_delayed_work(&q->requeue_work,
|
||||
msecs_to_jiffies(msecs));
|
||||
kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
|
||||
msecs_to_jiffies(msecs));
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
|
||||
|
||||
|
|
|
@ -16,6 +16,16 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/serial_core.h>
|
||||
|
||||
/*
|
||||
* Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
|
||||
* occasionally getting stuck as 1. To avoid the potential for a hang, check
|
||||
* TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
|
||||
* implementations, so only do so if an affected platform is detected in
|
||||
* parse_spcr().
|
||||
*/
|
||||
bool qdf2400_e44_present;
|
||||
EXPORT_SYMBOL(qdf2400_e44_present);
|
||||
|
||||
/*
|
||||
* Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.
|
||||
* Detect them by examining the OEM fields in the SPCR header, similiar to PCI
|
||||
|
@ -147,8 +157,30 @@ int __init parse_spcr(bool earlycon)
|
|||
goto done;
|
||||
}
|
||||
|
||||
if (qdf2400_erratum_44_present(&table->header))
|
||||
uart = "qdf2400_e44";
|
||||
/*
|
||||
* If the E44 erratum is required, then we need to tell the pl011
|
||||
* driver to implement the work-around.
|
||||
*
|
||||
* The global variable is used by the probe function when it
|
||||
* creates the UARTs, whether or not they're used as a console.
|
||||
*
|
||||
* If the user specifies "traditional" earlycon, the qdf2400_e44
|
||||
* console name matches the EARLYCON_DECLARE() statement, and
|
||||
* SPCR is not used. Parameter "earlycon" is false.
|
||||
*
|
||||
* If the user specifies "SPCR" earlycon, then we need to update
|
||||
* the console name so that it also says "qdf2400_e44". Parameter
|
||||
* "earlycon" is true.
|
||||
*
|
||||
* For consistency, if we change the console name, then we do it
|
||||
* for everyone, not just earlycon.
|
||||
*/
|
||||
if (qdf2400_erratum_44_present(&table->header)) {
|
||||
qdf2400_e44_present = true;
|
||||
if (earlycon)
|
||||
uart = "qdf2400_e44";
|
||||
}
|
||||
|
||||
if (xgene_8250_erratum_present(table))
|
||||
iotype = "mmio32";
|
||||
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче