Merge branch 'x86/cpufeature' into x86/asm, to pick up dependency
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Коммит
89a01c51cb
|
@ -1,4 +1,4 @@
|
|||
What: state
|
||||
What: /sys/devices/system/ibm_rtl/state
|
||||
Date: Sep 2010
|
||||
KernelVersion: 2.6.37
|
||||
Contact: Vernon Mauery <vernux@us.ibm.com>
|
||||
|
@ -10,7 +10,7 @@ Description: The state file allows a means by which to change in and
|
|||
Users: The ibm-prtm userspace daemon uses this interface.
|
||||
|
||||
|
||||
What: version
|
||||
What: /sys/devices/system/ibm_rtl/version
|
||||
Date: Sep 2010
|
||||
KernelVersion: 2.6.37
|
||||
Contact: Vernon Mauery <vernux@us.ibm.com>
|
||||
|
|
|
@ -43,6 +43,9 @@ Optional properties:
|
|||
reset signal present internally in some host controller IC designs.
|
||||
See Documentation/devicetree/bindings/reset/reset.txt for details.
|
||||
|
||||
* reset-names: request name for using "resets" property. Must be "reset".
|
||||
(It will be used together with "resets" property.)
|
||||
|
||||
* clocks: from common clock binding: handle to biu and ciu clocks for the
|
||||
bus interface unit clock and the card interface unit clock.
|
||||
|
||||
|
@ -103,6 +106,8 @@ board specific portions as listed below.
|
|||
interrupts = <0 75 0>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
resets = <&rst 20>;
|
||||
reset-names = "reset";
|
||||
};
|
||||
|
||||
[board specific internal DMA resources]
|
||||
|
|
|
@ -26,13 +26,16 @@ Required properties:
|
|||
- "sys"
|
||||
- "legacy"
|
||||
- "client"
|
||||
- resets: Must contain five entries for each entry in reset-names.
|
||||
- resets: Must contain seven entries for each entry in reset-names.
|
||||
See ../reset/reset.txt for details.
|
||||
- reset-names: Must include the following names
|
||||
- "core"
|
||||
- "mgmt"
|
||||
- "mgmt-sticky"
|
||||
- "pipe"
|
||||
- "pm"
|
||||
- "aclk"
|
||||
- "pclk"
|
||||
- pinctrl-names : The pin control state names
|
||||
- pinctrl-0: The "default" pinctrl state
|
||||
- #interrupt-cells: specifies the number of cells needed to encode an
|
||||
|
@ -86,8 +89,10 @@ pcie0: pcie@f8000000 {
|
|||
reg = <0x0 0xf8000000 0x0 0x2000000>, <0x0 0xfd000000 0x0 0x1000000>;
|
||||
reg-names = "axi-base", "apb-base";
|
||||
resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
|
||||
<&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>;
|
||||
reset-names = "core", "mgmt", "mgmt-sticky", "pipe";
|
||||
<&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE> ,
|
||||
<&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, <&cru SRST_A_PCIE>;
|
||||
reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
|
||||
"pm", "pclk", "aclk";
|
||||
phys = <&pcie_phy>;
|
||||
phy-names = "pcie-phy";
|
||||
pinctrl-names = "default";
|
||||
|
|
|
@ -14,11 +14,6 @@ Required properies:
|
|||
- #size-cells : The value of this property must be 1
|
||||
- ranges : defines mapping between pin controller node (parent) to
|
||||
gpio-bank node (children).
|
||||
- interrupt-parent: phandle of the interrupt parent to which the external
|
||||
GPIO interrupts are forwarded to.
|
||||
- st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
|
||||
which includes IRQ mux selection register, and the offset of the IRQ mux
|
||||
selection register.
|
||||
- pins-are-numbered: Specify the subnodes are using numbered pinmux to
|
||||
specify pins.
|
||||
|
||||
|
@ -37,6 +32,11 @@ Required properties:
|
|||
|
||||
Optional properties:
|
||||
- reset: : Reference to the reset controller
|
||||
- interrupt-parent: phandle of the interrupt parent to which the external
|
||||
GPIO interrupts are forwarded to.
|
||||
- st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
|
||||
which includes IRQ mux selection register, and the offset of the IRQ mux
|
||||
selection register.
|
||||
|
||||
Example:
|
||||
#include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
|
||||
|
|
|
@ -447,7 +447,6 @@ prototypes:
|
|||
int (*flush) (struct file *);
|
||||
int (*release) (struct inode *, struct file *);
|
||||
int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
|
||||
int (*aio_fsync) (struct kiocb *, int datasync);
|
||||
int (*fasync) (int, struct file *, int);
|
||||
int (*lock) (struct file *, int, struct file_lock *);
|
||||
ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,
|
||||
|
|
|
@ -828,7 +828,6 @@ struct file_operations {
|
|||
int (*flush) (struct file *, fl_owner_t id);
|
||||
int (*release) (struct inode *, struct file *);
|
||||
int (*fsync) (struct file *, loff_t, loff_t, int datasync);
|
||||
int (*aio_fsync) (struct kiocb *, int datasync);
|
||||
int (*fasync) (int, struct file *, int);
|
||||
int (*lock) (struct file *, int, struct file_lock *);
|
||||
ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
|
||||
|
|
|
@ -4,7 +4,17 @@ KVM Lock Overview
|
|||
1. Acquisition Orders
|
||||
---------------------
|
||||
|
||||
(to be written)
|
||||
The acquisition orders for mutexes are as follows:
|
||||
|
||||
- kvm->lock is taken outside vcpu->mutex
|
||||
|
||||
- kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock
|
||||
|
||||
- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
|
||||
them together is quite rare.
|
||||
|
||||
For spinlocks, kvm_lock is taken outside kvm->mmu_lock. Everything
|
||||
else is a leaf: no other lock is taken inside the critical sections.
|
||||
|
||||
2: Exception
|
||||
------------
|
||||
|
|
19
MAINTAINERS
19
MAINTAINERS
|
@ -7925,6 +7925,10 @@ F: mm/
|
|||
MEMORY TECHNOLOGY DEVICES (MTD)
|
||||
M: David Woodhouse <dwmw2@infradead.org>
|
||||
M: Brian Norris <computersforpeace@gmail.com>
|
||||
M: Boris Brezillon <boris.brezillon@free-electrons.com>
|
||||
M: Marek Vasut <marek.vasut@gmail.com>
|
||||
M: Richard Weinberger <richard@nod.at>
|
||||
M: Cyrille Pitchen <cyrille.pitchen@atmel.com>
|
||||
L: linux-mtd@lists.infradead.org
|
||||
W: http://www.linux-mtd.infradead.org/
|
||||
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
|
||||
|
@ -9331,7 +9335,7 @@ PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
|
|||
M: Keith Busch <keith.busch@intel.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Supported
|
||||
F: arch/x86/pci/vmd.c
|
||||
F: drivers/pci/host/vmd.c
|
||||
|
||||
PCIE DRIVER FOR ST SPEAR13XX
|
||||
M: Pratyush Anand <pratyush.anand@gmail.com>
|
||||
|
@ -11404,6 +11408,17 @@ W: http://www.st.com/spear
|
|||
S: Maintained
|
||||
F: drivers/clk/spear/
|
||||
|
||||
SPI NOR SUBSYSTEM
|
||||
M: Cyrille Pitchen <cyrille.pitchen@atmel.com>
|
||||
M: Marek Vasut <marek.vasut@gmail.com>
|
||||
L: linux-mtd@lists.infradead.org
|
||||
W: http://www.linux-mtd.infradead.org/
|
||||
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
|
||||
T: git git://github.com/spi-nor/linux.git
|
||||
S: Maintained
|
||||
F: drivers/mtd/spi-nor/
|
||||
F: include/linux/mtd/spi-nor.h
|
||||
|
||||
SPI SUBSYSTEM
|
||||
M: Mark Brown <broonie@kernel.org>
|
||||
L: linux-spi@vger.kernel.org
|
||||
|
@ -12783,6 +12798,7 @@ F: include/uapi/linux/virtio_console.h
|
|||
|
||||
VIRTIO CORE, NET AND BLOCK DRIVERS
|
||||
M: "Michael S. Tsirkin" <mst@redhat.com>
|
||||
M: Jason Wang <jasowang@redhat.com>
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/virtio/
|
||||
|
@ -12813,6 +12829,7 @@ F: include/uapi/linux/virtio_gpu.h
|
|||
|
||||
VIRTIO HOST (VHOST)
|
||||
M: "Michael S. Tsirkin" <mst@redhat.com>
|
||||
M: Jason Wang <jasowang@redhat.com>
|
||||
L: kvm@vger.kernel.org
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
L: netdev@vger.kernel.org
|
||||
|
|
12
Makefile
12
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Psychotic Stoned Sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -370,7 +370,7 @@ LDFLAGS_MODULE =
|
|||
CFLAGS_KERNEL =
|
||||
AFLAGS_KERNEL =
|
||||
LDFLAGS_vmlinux =
|
||||
CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im
|
||||
CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
|
||||
CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
|
||||
|
||||
|
||||
|
@ -620,7 +620,6 @@ ARCH_CFLAGS :=
|
|||
include arch/$(SRCARCH)/Makefile
|
||||
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
|
||||
|
||||
ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
|
||||
|
@ -629,15 +628,18 @@ KBUILD_CFLAGS += $(call cc-option,-fdata-sections,)
|
|||
endif
|
||||
|
||||
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
|
||||
KBUILD_CFLAGS += -Os
|
||||
KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
|
||||
else
|
||||
ifdef CONFIG_PROFILE_ALL_BRANCHES
|
||||
KBUILD_CFLAGS += -O2
|
||||
KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
|
||||
else
|
||||
KBUILD_CFLAGS += -O2
|
||||
endif
|
||||
endif
|
||||
|
||||
KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
|
||||
$(call cc-disable-warning,maybe-uninitialized,))
|
||||
|
||||
# Tell gcc to never replace conditional load with a non-conditional one
|
||||
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
|
||||
|
||||
|
|
|
@ -50,6 +50,9 @@ atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y)
|
|||
|
||||
cflags-$(atleast_gcc44) += -fsection-anchors
|
||||
|
||||
cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
|
||||
cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
|
||||
|
||||
ifdef CONFIG_ISA_ARCV2
|
||||
|
||||
ifndef CONFIG_ARC_HAS_LL64
|
||||
|
@ -68,7 +71,9 @@ cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables $(cfi)
|
|||
ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
|
||||
# Generic build system uses -O2, we want -O3
|
||||
# Note: No need to add to cflags-y as that happens anyways
|
||||
ARCH_CFLAGS += -O3
|
||||
#
|
||||
# Disable the false maybe-uninitialized warings gcc spits out at -O3
|
||||
ARCH_CFLAGS += -O3 $(call cc-disable-warning,maybe-uninitialized,)
|
||||
endif
|
||||
|
||||
# small data is default for elf32 tool-chain. If not usable, disable it
|
||||
|
|
|
@ -71,7 +71,7 @@
|
|||
reg-io-width = <4>;
|
||||
};
|
||||
|
||||
arcpmu0: pmu {
|
||||
arcpct0: pct {
|
||||
compatible = "snps,arc700-pct";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -69,7 +69,7 @@
|
|||
};
|
||||
};
|
||||
|
||||
arcpmu0: pmu {
|
||||
arcpct0: pct {
|
||||
compatible = "snps,arc700-pct";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -83,5 +83,9 @@
|
|||
reg = <0xf0003000 0x44>;
|
||||
interrupts = <7>;
|
||||
};
|
||||
|
||||
arcpct0: pct {
|
||||
compatible = "snps,arc700-pct";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
|
|||
CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
CONFIG_EMBEDDED=y
|
||||
CONFIG_PERF_EVENTS=y
|
||||
# CONFIG_SLUB_DEBUG is not set
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_KPROBES=y
|
||||
|
|
|
@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
|
|||
CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
CONFIG_EMBEDDED=y
|
||||
CONFIG_PERF_EVENTS=y
|
||||
# CONFIG_SLUB_DEBUG is not set
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_KPROBES=y
|
||||
|
|
|
@ -12,6 +12,7 @@ CONFIG_BLK_DEV_INITRD=y
|
|||
CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
CONFIG_EMBEDDED=y
|
||||
CONFIG_PERF_EVENTS=y
|
||||
# CONFIG_SLUB_DEBUG is not set
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_KPROBES=y
|
||||
|
|
|
@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
|
|||
CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
CONFIG_EMBEDDED=y
|
||||
CONFIG_PERF_EVENTS=y
|
||||
# CONFIG_SLUB_DEBUG is not set
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_KPROBES=y
|
||||
|
|
|
@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
|
|||
CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
CONFIG_EMBEDDED=y
|
||||
CONFIG_PERF_EVENTS=y
|
||||
# CONFIG_SLUB_DEBUG is not set
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_KPROBES=y
|
||||
|
|
|
@ -10,6 +10,7 @@ CONFIG_IKCONFIG_PROC=y
|
|||
# CONFIG_PID_NS is not set
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
|
||||
CONFIG_PERF_EVENTS=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_KPROBES=y
|
||||
CONFIG_MODULES=y
|
||||
|
@ -34,7 +35,6 @@ CONFIG_INET=y
|
|||
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
|
||||
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
|
||||
# CONFIG_INET_XFRM_MODE_BEET is not set
|
||||
# CONFIG_INET_LRO is not set
|
||||
# CONFIG_IPV6 is not set
|
||||
# CONFIG_WIRELESS is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
|
@ -72,7 +72,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
|
|||
# CONFIG_HWMON is not set
|
||||
CONFIG_DRM=y
|
||||
CONFIG_DRM_ARCPGU=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||
CONFIG_LOGO=y
|
||||
# CONFIG_HID is not set
|
||||
# CONFIG_USB_SUPPORT is not set
|
||||
|
|
|
@ -43,12 +43,14 @@
|
|||
#define STATUS_AE_BIT 5 /* Exception active */
|
||||
#define STATUS_DE_BIT 6 /* PC is in delay slot */
|
||||
#define STATUS_U_BIT 7 /* User/Kernel mode */
|
||||
#define STATUS_Z_BIT 11
|
||||
#define STATUS_L_BIT 12 /* Loop inhibit */
|
||||
|
||||
/* These masks correspond to the status word(STATUS_32) bits */
|
||||
#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
|
||||
#define STATUS_DE_MASK (1<<STATUS_DE_BIT)
|
||||
#define STATUS_U_MASK (1<<STATUS_U_BIT)
|
||||
#define STATUS_Z_MASK (1<<STATUS_Z_BIT)
|
||||
#define STATUS_L_MASK (1<<STATUS_L_BIT)
|
||||
|
||||
/*
|
||||
|
|
|
@ -37,9 +37,9 @@ extern const char *arc_platform_smp_cpuinfo(void);
|
|||
* API expected BY platform smp code (FROM arch smp code)
|
||||
*
|
||||
* smp_ipi_irq_setup:
|
||||
* Takes @cpu and @irq to which the arch-common ISR is hooked up
|
||||
* Takes @cpu and @hwirq to which the arch-common ISR is hooked up
|
||||
*/
|
||||
extern int smp_ipi_irq_setup(int cpu, int irq);
|
||||
extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq);
|
||||
|
||||
/*
|
||||
* struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
|
||||
|
|
|
@ -31,6 +31,8 @@ static void __init arc_set_early_base_baud(unsigned long dt_root)
|
|||
arc_base_baud = 166666666; /* Fixed 166.6MHz clk (TB10x) */
|
||||
else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp"))
|
||||
arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x) */
|
||||
else if (of_flat_dt_is_compatible(dt_root, "ezchip,arc-nps"))
|
||||
arc_base_baud = 800000000; /* Fixed 800MHz clk (NPS) */
|
||||
else
|
||||
arc_base_baud = 50000000; /* Fixed default 50MHz */
|
||||
}
|
||||
|
|
|
@ -181,6 +181,8 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
|
|||
{
|
||||
unsigned long flags;
|
||||
cpumask_t online;
|
||||
unsigned int destination_bits;
|
||||
unsigned int distribution_mode;
|
||||
|
||||
/* errout if no online cpu per @cpumask */
|
||||
if (!cpumask_and(&online, cpumask, cpu_online_mask))
|
||||
|
@ -188,8 +190,15 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
|
|||
|
||||
raw_spin_lock_irqsave(&mcip_lock, flags);
|
||||
|
||||
idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
|
||||
idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
|
||||
destination_bits = cpumask_bits(&online)[0];
|
||||
idu_set_dest(data->hwirq, destination_bits);
|
||||
|
||||
if (ffs(destination_bits) == fls(destination_bits))
|
||||
distribution_mode = IDU_M_DISTRI_DEST;
|
||||
else
|
||||
distribution_mode = IDU_M_DISTRI_RR;
|
||||
|
||||
idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode);
|
||||
|
||||
raw_spin_unlock_irqrestore(&mcip_lock, flags);
|
||||
|
||||
|
@ -207,16 +216,15 @@ static struct irq_chip idu_irq_chip = {
|
|||
|
||||
};
|
||||
|
||||
static int idu_first_irq;
|
||||
static irq_hw_number_t idu_first_hwirq;
|
||||
|
||||
static void idu_cascade_isr(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_domain *domain = irq_desc_get_handler_data(desc);
|
||||
unsigned int core_irq = irq_desc_get_irq(desc);
|
||||
unsigned int idu_irq;
|
||||
struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
|
||||
irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
|
||||
irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
|
||||
|
||||
idu_irq = core_irq - idu_first_irq;
|
||||
generic_handle_irq(irq_find_mapping(domain, idu_irq));
|
||||
generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
|
||||
}
|
||||
|
||||
static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
|
||||
|
@ -282,7 +290,7 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
|
|||
struct irq_domain *domain;
|
||||
/* Read IDU BCR to confirm nr_irqs */
|
||||
int nr_irqs = of_irq_count(intc);
|
||||
int i, irq;
|
||||
int i, virq;
|
||||
struct mcip_bcr mp;
|
||||
|
||||
READ_BCR(ARC_REG_MCIP_BCR, mp);
|
||||
|
@ -303,11 +311,11 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
|
|||
* however we need it to get the parent virq and set IDU handler
|
||||
* as first level isr
|
||||
*/
|
||||
irq = irq_of_parse_and_map(intc, i);
|
||||
virq = irq_of_parse_and_map(intc, i);
|
||||
if (!i)
|
||||
idu_first_irq = irq;
|
||||
idu_first_hwirq = irqd_to_hwirq(irq_get_irq_data(virq));
|
||||
|
||||
irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain);
|
||||
irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
|
||||
}
|
||||
|
||||
__mcip_cmd(CMD_IDU_ENABLE, 0);
|
||||
|
|
|
@ -43,8 +43,8 @@ SYSCALL_DEFINE0(arc_gettls)
|
|||
|
||||
SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
|
||||
{
|
||||
int uval;
|
||||
int ret;
|
||||
struct pt_regs *regs = current_pt_regs();
|
||||
int uval = -EFAULT;
|
||||
|
||||
/*
|
||||
* This is only for old cores lacking LLOCK/SCOND, which by defintion
|
||||
|
@ -54,24 +54,26 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
|
|||
*/
|
||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
|
||||
|
||||
/* Z indicates to userspace if operation succeded */
|
||||
regs->status32 &= ~STATUS_Z_MASK;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
return -EFAULT;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
ret = __get_user(uval, uaddr);
|
||||
if (ret)
|
||||
if (__get_user(uval, uaddr))
|
||||
goto done;
|
||||
|
||||
if (uval != expected)
|
||||
ret = -EAGAIN;
|
||||
else
|
||||
ret = __put_user(new, uaddr);
|
||||
if (uval == expected) {
|
||||
if (!__put_user(new, uaddr))
|
||||
regs->status32 |= STATUS_Z_MASK;
|
||||
}
|
||||
|
||||
done:
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
return uval;
|
||||
}
|
||||
|
||||
void arch_cpu_idle(void)
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/atomic.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/mach_desc.h>
|
||||
|
@ -67,11 +68,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
int i;
|
||||
|
||||
/*
|
||||
* Initialise the present map, which describes the set of CPUs
|
||||
* actually populated at the present time.
|
||||
* if platform didn't set the present map already, do it now
|
||||
* boot cpu is set to present already by init/main.c
|
||||
*/
|
||||
for (i = 0; i < max_cpus; i++)
|
||||
set_cpu_present(i, true);
|
||||
if (num_present_cpus() <= 1) {
|
||||
for (i = 0; i < max_cpus; i++)
|
||||
set_cpu_present(i, true);
|
||||
}
|
||||
}
|
||||
|
||||
void __init smp_cpus_done(unsigned int max_cpus)
|
||||
|
@ -351,20 +354,24 @@ irqreturn_t do_IPI(int irq, void *dev_id)
|
|||
*/
|
||||
static DEFINE_PER_CPU(int, ipi_dev);
|
||||
|
||||
int smp_ipi_irq_setup(int cpu, int irq)
|
||||
int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
|
||||
{
|
||||
int *dev = per_cpu_ptr(&ipi_dev, cpu);
|
||||
unsigned int virq = irq_find_mapping(NULL, hwirq);
|
||||
|
||||
if (!virq)
|
||||
panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
|
||||
|
||||
/* Boot cpu calls request, all call enable */
|
||||
if (!cpu) {
|
||||
int rc;
|
||||
|
||||
rc = request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev);
|
||||
rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
|
||||
if (rc)
|
||||
panic("Percpu IRQ request failed for %d\n", irq);
|
||||
panic("Percpu IRQ request failed for %u\n", virq);
|
||||
}
|
||||
|
||||
enable_percpu_irq(irq, 0);
|
||||
enable_percpu_irq(virq, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -152,14 +152,17 @@ static cycle_t arc_read_rtc(struct clocksource *cs)
|
|||
cycle_t full;
|
||||
} stamp;
|
||||
|
||||
|
||||
__asm__ __volatile(
|
||||
"1: \n"
|
||||
" lr %0, [AUX_RTC_LOW] \n"
|
||||
" lr %1, [AUX_RTC_HIGH] \n"
|
||||
" lr %2, [AUX_RTC_CTRL] \n"
|
||||
" bbit0.nt %2, 31, 1b \n"
|
||||
: "=r" (stamp.low), "=r" (stamp.high), "=r" (status));
|
||||
/*
|
||||
* hardware has an internal state machine which tracks readout of
|
||||
* low/high and updates the CTRL.status if
|
||||
* - interrupt/exception taken between the two reads
|
||||
* - high increments after low has been read
|
||||
*/
|
||||
do {
|
||||
stamp.low = read_aux_reg(AUX_RTC_LOW);
|
||||
stamp.high = read_aux_reg(AUX_RTC_HIGH);
|
||||
status = read_aux_reg(AUX_RTC_CTRL);
|
||||
} while (!(status & _BITUL(31)));
|
||||
|
||||
return stamp.full;
|
||||
}
|
||||
|
|
|
@ -105,6 +105,31 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
|
|||
__free_pages(page, get_order(size));
|
||||
}
|
||||
|
||||
static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
unsigned long user_count = vma_pages(vma);
|
||||
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr));
|
||||
unsigned long off = vma->vm_pgoff;
|
||||
int ret = -ENXIO;
|
||||
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
|
||||
return ret;
|
||||
|
||||
if (off < count && user_count <= (count - off)) {
|
||||
ret = remap_pfn_range(vma, vma->vm_start,
|
||||
pfn + off,
|
||||
user_count << PAGE_SHIFT,
|
||||
vma->vm_page_prot);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* streaming DMA Mapping API...
|
||||
* CPU accesses page via normal paddr, thus needs to explicitly made
|
||||
|
@ -193,6 +218,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask)
|
|||
struct dma_map_ops arc_dma_ops = {
|
||||
.alloc = arc_dma_alloc,
|
||||
.free = arc_dma_free,
|
||||
.mmap = arc_dma_mmap,
|
||||
.map_page = arc_dma_map_page,
|
||||
.map_sg = arc_dma_map_sg,
|
||||
.sync_single_for_device = arc_dma_sync_single_for_device,
|
||||
|
|
|
@ -140,16 +140,10 @@ static void eznps_init_per_cpu(int cpu)
|
|||
mtm_enable_core(cpu);
|
||||
}
|
||||
|
||||
static void eznps_ipi_clear(int irq)
|
||||
{
|
||||
write_aux_reg(CTOP_AUX_IACK, 1 << irq);
|
||||
}
|
||||
|
||||
struct plat_smp_ops plat_smp_ops = {
|
||||
.info = smp_cpuinfo_buf,
|
||||
.init_early_smp = eznps_init_cpumasks,
|
||||
.cpu_kick = eznps_smp_wakeup_cpu,
|
||||
.ipi_send = eznps_ipi_send,
|
||||
.init_per_cpu = eznps_init_per_cpu,
|
||||
.ipi_clear = eznps_ipi_clear,
|
||||
};
|
||||
|
|
|
@ -66,6 +66,7 @@ extern char __kvm_hyp_vector[];
|
|||
extern void __kvm_flush_vm_context(void);
|
||||
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
|
||||
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
||||
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
|
|
@ -57,6 +57,9 @@ struct kvm_arch {
|
|||
/* VTTBR value associated with below pgd and vmid */
|
||||
u64 vttbr;
|
||||
|
||||
/* The last vcpu id that ran on each physical CPU */
|
||||
int __percpu *last_vcpu_ran;
|
||||
|
||||
/* Timer */
|
||||
struct arch_timer_kvm timer;
|
||||
|
||||
|
|
|
@ -71,6 +71,7 @@
|
|||
#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
|
||||
#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
|
||||
#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
|
||||
#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
|
||||
#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
|
||||
#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
|
||||
#define NMRR __ACCESS_CP15(c10, 0, c2, 1)
|
||||
|
|
|
@ -114,11 +114,18 @@ void kvm_arch_check_processor_compat(void *rtn)
|
|||
*/
|
||||
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret, cpu;
|
||||
|
||||
if (type)
|
||||
return -EINVAL;
|
||||
|
||||
kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
|
||||
if (!kvm->arch.last_vcpu_ran)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
*per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
|
||||
|
||||
ret = kvm_alloc_stage2_pgd(kvm);
|
||||
if (ret)
|
||||
goto out_fail_alloc;
|
||||
|
@ -141,6 +148,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||
out_free_stage2_pgd:
|
||||
kvm_free_stage2_pgd(kvm);
|
||||
out_fail_alloc:
|
||||
free_percpu(kvm->arch.last_vcpu_ran);
|
||||
kvm->arch.last_vcpu_ran = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -168,6 +177,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
|||
{
|
||||
int i;
|
||||
|
||||
free_percpu(kvm->arch.last_vcpu_ran);
|
||||
kvm->arch.last_vcpu_ran = NULL;
|
||||
|
||||
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
|
||||
if (kvm->vcpus[i]) {
|
||||
kvm_arch_vcpu_free(kvm->vcpus[i]);
|
||||
|
@ -312,6 +324,19 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
|||
|
||||
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
int *last_ran;
|
||||
|
||||
last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
|
||||
|
||||
/*
|
||||
* We might get preempted before the vCPU actually runs, but
|
||||
* over-invalidation doesn't affect correctness.
|
||||
*/
|
||||
if (*last_ran != vcpu->vcpu_id) {
|
||||
kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
|
||||
*last_ran = vcpu->vcpu_id;
|
||||
}
|
||||
|
||||
vcpu->cpu = cpu;
|
||||
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
|
||||
|
||||
|
|
|
@ -55,6 +55,21 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
|
|||
__kvm_tlb_flush_vmid(kvm);
|
||||
}
|
||||
|
||||
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
|
||||
|
||||
/* Switch to requested VMID */
|
||||
write_sysreg(kvm->arch.vttbr, VTTBR);
|
||||
isb();
|
||||
|
||||
write_sysreg(0, TLBIALL);
|
||||
dsb(nsh);
|
||||
isb();
|
||||
|
||||
write_sysreg(0, VTTBR);
|
||||
}
|
||||
|
||||
void __hyp_text __kvm_flush_vm_context(void)
|
||||
{
|
||||
write_sysreg(0, TLBIALLNSNHIS);
|
||||
|
|
|
@ -300,8 +300,11 @@
|
|||
ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000
|
||||
0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>;
|
||||
resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
|
||||
<&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>;
|
||||
reset-names = "core", "mgmt", "mgmt-sticky", "pipe";
|
||||
<&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>,
|
||||
<&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>,
|
||||
<&cru SRST_A_PCIE>;
|
||||
reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
|
||||
"pm", "pclk", "aclk";
|
||||
status = "disabled";
|
||||
|
||||
pcie0_intc: interrupt-controller {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#ifndef __ASM_ALTERNATIVE_H
|
||||
#define __ASM_ALTERNATIVE_H
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/cpucaps.h>
|
||||
#include <asm/insn.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* arch/arm64/include/asm/cpucaps.h
|
||||
*
|
||||
* Copyright (C) 2016 ARM Ltd.
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef __ASM_CPUCAPS_H
|
||||
#define __ASM_CPUCAPS_H
|
||||
|
||||
#define ARM64_WORKAROUND_CLEAN_CACHE 0
|
||||
#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
|
||||
#define ARM64_WORKAROUND_845719 2
|
||||
#define ARM64_HAS_SYSREG_GIC_CPUIF 3
|
||||
#define ARM64_HAS_PAN 4
|
||||
#define ARM64_HAS_LSE_ATOMICS 5
|
||||
#define ARM64_WORKAROUND_CAVIUM_23154 6
|
||||
#define ARM64_WORKAROUND_834220 7
|
||||
#define ARM64_HAS_NO_HW_PREFETCH 8
|
||||
#define ARM64_HAS_UAO 9
|
||||
#define ARM64_ALT_PAN_NOT_UAO 10
|
||||
#define ARM64_HAS_VIRT_HOST_EXTN 11
|
||||
#define ARM64_WORKAROUND_CAVIUM_27456 12
|
||||
#define ARM64_HAS_32BIT_EL0 13
|
||||
#define ARM64_HYP_OFFSET_LOW 14
|
||||
#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
|
||||
|
||||
#define ARM64_NCAPS 16
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
#include <linux/jump_label.h>
|
||||
|
||||
#include <asm/cpucaps.h>
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
|
@ -24,25 +25,6 @@
|
|||
#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
|
||||
#define cpu_feature(x) ilog2(HWCAP_ ## x)
|
||||
|
||||
#define ARM64_WORKAROUND_CLEAN_CACHE 0
|
||||
#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
|
||||
#define ARM64_WORKAROUND_845719 2
|
||||
#define ARM64_HAS_SYSREG_GIC_CPUIF 3
|
||||
#define ARM64_HAS_PAN 4
|
||||
#define ARM64_HAS_LSE_ATOMICS 5
|
||||
#define ARM64_WORKAROUND_CAVIUM_23154 6
|
||||
#define ARM64_WORKAROUND_834220 7
|
||||
#define ARM64_HAS_NO_HW_PREFETCH 8
|
||||
#define ARM64_HAS_UAO 9
|
||||
#define ARM64_ALT_PAN_NOT_UAO 10
|
||||
#define ARM64_HAS_VIRT_HOST_EXTN 11
|
||||
#define ARM64_WORKAROUND_CAVIUM_27456 12
|
||||
#define ARM64_HAS_32BIT_EL0 13
|
||||
#define ARM64_HYP_OFFSET_LOW 14
|
||||
#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
|
||||
|
||||
#define ARM64_NCAPS 16
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
|
|
@ -54,6 +54,7 @@ extern char __kvm_hyp_vector[];
|
|||
extern void __kvm_flush_vm_context(void);
|
||||
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
|
||||
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
||||
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
|
|
@ -62,6 +62,9 @@ struct kvm_arch {
|
|||
/* VTTBR value associated with above pgd and vmid */
|
||||
u64 vttbr;
|
||||
|
||||
/* The last vcpu id that ran on each physical CPU */
|
||||
int __percpu *last_vcpu_ran;
|
||||
|
||||
/* The maximum number of vCPUs depends on the used GIC model */
|
||||
int max_vcpus;
|
||||
|
||||
|
|
|
@ -128,7 +128,7 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
|
|||
return v;
|
||||
}
|
||||
|
||||
#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
|
||||
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
|
||||
|
||||
/*
|
||||
* We currently only support a 40bit IPA.
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
|
||||
#include <linux/stringify.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
#ifdef __ASSEMBLER__
|
||||
|
||||
|
|
|
@ -64,6 +64,21 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
|
|||
write_sysreg(0, vttbr_el2);
|
||||
}
|
||||
|
||||
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
|
||||
|
||||
/* Switch to requested VMID */
|
||||
write_sysreg(kvm->arch.vttbr, vttbr_el2);
|
||||
isb();
|
||||
|
||||
asm volatile("tlbi vmalle1" : : );
|
||||
dsb(nsh);
|
||||
isb();
|
||||
|
||||
write_sysreg(0, vttbr_el2);
|
||||
}
|
||||
|
||||
void __hyp_text __kvm_flush_vm_context(void)
|
||||
{
|
||||
dsb(ishst);
|
||||
|
|
|
@ -263,7 +263,7 @@ KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
|
|||
|
||||
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \
|
||||
VMLINUX_ENTRY_ADDRESS=$(entry-y) \
|
||||
PLATFORM=$(platform-y)
|
||||
PLATFORM="$(platform-y)"
|
||||
ifdef CONFIG_32BIT
|
||||
bootvars-y += ADDR_BITS=32
|
||||
endif
|
||||
|
|
|
@ -84,12 +84,13 @@
|
|||
fpga_regs: system-controller@1f000000 {
|
||||
compatible = "mti,malta-fpga", "syscon", "simple-mfd";
|
||||
reg = <0x1f000000 0x1000>;
|
||||
native-endian;
|
||||
|
||||
reboot {
|
||||
compatible = "syscon-reboot";
|
||||
regmap = <&fpga_regs>;
|
||||
offset = <0x500>;
|
||||
mask = <0x4d>;
|
||||
mask = <0x42>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -29,10 +29,20 @@ static __initdata const struct mips_machine *mach;
|
|||
static __initdata const void *mach_match_data;
|
||||
|
||||
void __init prom_init(void)
|
||||
{
|
||||
plat_get_fdt();
|
||||
BUG_ON(!fdt);
|
||||
}
|
||||
|
||||
void __init *plat_get_fdt(void)
|
||||
{
|
||||
const struct mips_machine *check_mach;
|
||||
const struct of_device_id *match;
|
||||
|
||||
if (fdt)
|
||||
/* Already set up */
|
||||
return (void *)fdt;
|
||||
|
||||
if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) {
|
||||
/*
|
||||
* We booted using the UHI boot protocol, so we have been
|
||||
|
@ -75,12 +85,6 @@ void __init prom_init(void)
|
|||
/* Retrieve the machine's FDT */
|
||||
fdt = mach->fdt;
|
||||
}
|
||||
|
||||
BUG_ON(!fdt);
|
||||
}
|
||||
|
||||
void __init *plat_get_fdt(void)
|
||||
{
|
||||
return (void *)fdt;
|
||||
}
|
||||
|
||||
|
|
|
@ -63,6 +63,8 @@ do { \
|
|||
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
|
||||
struct mips_fpu_struct *ctx, int has_fpu,
|
||||
void *__user *fault_addr);
|
||||
void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
|
||||
struct task_struct *tsk);
|
||||
int process_fpemu_return(int sig, void __user *fault_addr,
|
||||
unsigned long fcr31);
|
||||
int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
|
||||
|
@ -81,4 +83,15 @@ static inline void fpu_emulator_init_fpu(void)
|
|||
set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mask the FCSR Cause bits according to the Enable bits, observing
|
||||
* that Unimplemented is always enabled.
|
||||
*/
|
||||
static inline unsigned long mask_fcr31_x(unsigned long fcr31)
|
||||
{
|
||||
return fcr31 & (FPU_CSR_UNI_X |
|
||||
((fcr31 & FPU_CSR_ALL_E) <<
|
||||
(ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E))));
|
||||
}
|
||||
|
||||
#endif /* _ASM_FPU_EMULATOR_H */
|
||||
|
|
|
@ -293,7 +293,10 @@ struct kvm_vcpu_arch {
|
|||
/* Host KSEG0 address of the EI/DI offset */
|
||||
void *kseg0_commpage;
|
||||
|
||||
u32 io_gpr; /* GPR used as IO source/target */
|
||||
/* Resume PC after MMIO completion */
|
||||
unsigned long io_pc;
|
||||
/* GPR used as IO source/target */
|
||||
u32 io_gpr;
|
||||
|
||||
struct hrtimer comparecount_timer;
|
||||
/* Count timer control KVM register */
|
||||
|
@ -315,8 +318,6 @@ struct kvm_vcpu_arch {
|
|||
/* Bitmask of pending exceptions to be cleared */
|
||||
unsigned long pending_exceptions_clr;
|
||||
|
||||
u32 pending_load_cause;
|
||||
|
||||
/* Save/Restore the entryhi register when are are preempted/scheduled back in */
|
||||
unsigned long preempt_entryhi;
|
||||
|
||||
|
|
|
@ -75,6 +75,22 @@ do { if (cpu_has_rw_llb) { \
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Check FCSR for any unmasked exceptions pending set with `ptrace',
|
||||
* clear them and send a signal.
|
||||
*/
|
||||
#define __sanitize_fcr31(next) \
|
||||
do { \
|
||||
unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \
|
||||
void __user *pc; \
|
||||
\
|
||||
if (unlikely(fcr31)) { \
|
||||
pc = (void __user *)task_pt_regs(next)->cp0_epc; \
|
||||
next->thread.fpu.fcr31 &= ~fcr31; \
|
||||
force_fcr31_sig(fcr31, pc, next); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* For newly created kernel threads switch_to() will return to
|
||||
* ret_from_kernel_thread, newly created user threads to ret_from_fork.
|
||||
|
@ -85,6 +101,8 @@ do { if (cpu_has_rw_llb) { \
|
|||
do { \
|
||||
__mips_mt_fpaff_switch_to(prev); \
|
||||
lose_fpu_inatomic(1, prev); \
|
||||
if (tsk_used_math(next)) \
|
||||
__sanitize_fcr31(next); \
|
||||
if (cpu_has_dsp) { \
|
||||
__save_dsp(prev); \
|
||||
__restore_dsp(next); \
|
||||
|
|
|
@ -21,6 +21,11 @@ static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
|
|||
|
||||
static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
|
||||
|
||||
phys_addr_t __weak mips_cpc_default_phys_base(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mips_cpc_phys_base - retrieve the physical base address of the CPC
|
||||
*
|
||||
|
@ -43,8 +48,12 @@ static phys_addr_t mips_cpc_phys_base(void)
|
|||
if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
|
||||
return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
|
||||
|
||||
/* Otherwise, give it the default address & enable it */
|
||||
/* Otherwise, use the default address */
|
||||
cpc_base = mips_cpc_default_phys_base();
|
||||
if (!cpc_base)
|
||||
return cpc_base;
|
||||
|
||||
/* Enable the CPC, mapped at the default address */
|
||||
write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
|
||||
return cpc_base;
|
||||
}
|
||||
|
|
|
@ -899,7 +899,7 @@ static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
|
|||
* mipsr2_decoder: Decode and emulate a MIPS R2 instruction
|
||||
* @regs: Process register set
|
||||
* @inst: Instruction to decode and emulate
|
||||
* @fcr31: Floating Point Control and Status Register returned
|
||||
* @fcr31: Floating Point Control and Status Register Cause bits returned
|
||||
*/
|
||||
int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
|
||||
{
|
||||
|
@ -1172,13 +1172,13 @@ fpu_emul:
|
|||
|
||||
err = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
|
||||
&fault_addr);
|
||||
*fcr31 = current->thread.fpu.fcr31;
|
||||
|
||||
/*
|
||||
* We can't allow the emulated instruction to leave any of
|
||||
* the cause bits set in $fcr31.
|
||||
* We can't allow the emulated instruction to leave any
|
||||
* enabled Cause bits set in $fcr31.
|
||||
*/
|
||||
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
|
||||
*fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31);
|
||||
current->thread.fpu.fcr31 &= ~res;
|
||||
|
||||
/*
|
||||
* this is a tricky issue - lose_fpu() uses LL/SC atomics
|
||||
|
|
|
@ -79,16 +79,15 @@ void ptrace_disable(struct task_struct *child)
|
|||
}
|
||||
|
||||
/*
|
||||
* Poke at FCSR according to its mask. Don't set the cause bits as
|
||||
* this is currently not handled correctly in FP context restoration
|
||||
* and will cause an oops if a corresponding enable bit is set.
|
||||
* Poke at FCSR according to its mask. Set the Cause bits even
|
||||
* if a corresponding Enable bit is set. This will be noticed at
|
||||
* the time the thread is switched to and SIGFPE thrown accordingly.
|
||||
*/
|
||||
static void ptrace_setfcr31(struct task_struct *child, u32 value)
|
||||
{
|
||||
u32 fcr31;
|
||||
u32 mask;
|
||||
|
||||
value &= ~FPU_CSR_ALL_X;
|
||||
fcr31 = child->thread.fpu.fcr31;
|
||||
mask = boot_cpu_data.fpu_msk31;
|
||||
child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
|
||||
|
@ -817,6 +816,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
break;
|
||||
#endif
|
||||
case FPC_CSR:
|
||||
init_fp_ctx(child);
|
||||
ptrace_setfcr31(child, data);
|
||||
break;
|
||||
case DSP_BASE ... DSP_BASE + 5: {
|
||||
|
|
|
@ -19,108 +19,86 @@
|
|||
#include <asm/regdef.h>
|
||||
|
||||
#define EX(a,b) \
|
||||
9: a,##b; \
|
||||
.section __ex_table,"a"; \
|
||||
PTR 9b,fault; \
|
||||
.previous
|
||||
|
||||
#define EX2(a,b) \
|
||||
9: a,##b; \
|
||||
.section __ex_table,"a"; \
|
||||
PTR 9b,bad_stack; \
|
||||
PTR 9b+4,bad_stack; \
|
||||
.previous
|
||||
|
||||
.set noreorder
|
||||
.set mips1
|
||||
/* Save floating point context */
|
||||
|
||||
/**
|
||||
* _save_fp_context() - save FP context from the FPU
|
||||
* @a0 - pointer to fpregs field of sigcontext
|
||||
* @a1 - pointer to fpc_csr field of sigcontext
|
||||
*
|
||||
* Save FP context, including the 32 FP data registers and the FP
|
||||
* control & status register, from the FPU to signal context.
|
||||
*/
|
||||
LEAF(_save_fp_context)
|
||||
.set push
|
||||
SET_HARDFLOAT
|
||||
li v0, 0 # assume success
|
||||
cfc1 t1,fcr31
|
||||
EX(swc1 $f0,(SC_FPREGS+0)(a0))
|
||||
EX(swc1 $f1,(SC_FPREGS+8)(a0))
|
||||
EX(swc1 $f2,(SC_FPREGS+16)(a0))
|
||||
EX(swc1 $f3,(SC_FPREGS+24)(a0))
|
||||
EX(swc1 $f4,(SC_FPREGS+32)(a0))
|
||||
EX(swc1 $f5,(SC_FPREGS+40)(a0))
|
||||
EX(swc1 $f6,(SC_FPREGS+48)(a0))
|
||||
EX(swc1 $f7,(SC_FPREGS+56)(a0))
|
||||
EX(swc1 $f8,(SC_FPREGS+64)(a0))
|
||||
EX(swc1 $f9,(SC_FPREGS+72)(a0))
|
||||
EX(swc1 $f10,(SC_FPREGS+80)(a0))
|
||||
EX(swc1 $f11,(SC_FPREGS+88)(a0))
|
||||
EX(swc1 $f12,(SC_FPREGS+96)(a0))
|
||||
EX(swc1 $f13,(SC_FPREGS+104)(a0))
|
||||
EX(swc1 $f14,(SC_FPREGS+112)(a0))
|
||||
EX(swc1 $f15,(SC_FPREGS+120)(a0))
|
||||
EX(swc1 $f16,(SC_FPREGS+128)(a0))
|
||||
EX(swc1 $f17,(SC_FPREGS+136)(a0))
|
||||
EX(swc1 $f18,(SC_FPREGS+144)(a0))
|
||||
EX(swc1 $f19,(SC_FPREGS+152)(a0))
|
||||
EX(swc1 $f20,(SC_FPREGS+160)(a0))
|
||||
EX(swc1 $f21,(SC_FPREGS+168)(a0))
|
||||
EX(swc1 $f22,(SC_FPREGS+176)(a0))
|
||||
EX(swc1 $f23,(SC_FPREGS+184)(a0))
|
||||
EX(swc1 $f24,(SC_FPREGS+192)(a0))
|
||||
EX(swc1 $f25,(SC_FPREGS+200)(a0))
|
||||
EX(swc1 $f26,(SC_FPREGS+208)(a0))
|
||||
EX(swc1 $f27,(SC_FPREGS+216)(a0))
|
||||
EX(swc1 $f28,(SC_FPREGS+224)(a0))
|
||||
EX(swc1 $f29,(SC_FPREGS+232)(a0))
|
||||
EX(swc1 $f30,(SC_FPREGS+240)(a0))
|
||||
EX(swc1 $f31,(SC_FPREGS+248)(a0))
|
||||
EX(sw t1,(SC_FPC_CSR)(a0))
|
||||
cfc1 t0,$0 # implementation/version
|
||||
cfc1 t1, fcr31
|
||||
EX2(s.d $f0, 0(a0))
|
||||
EX2(s.d $f2, 16(a0))
|
||||
EX2(s.d $f4, 32(a0))
|
||||
EX2(s.d $f6, 48(a0))
|
||||
EX2(s.d $f8, 64(a0))
|
||||
EX2(s.d $f10, 80(a0))
|
||||
EX2(s.d $f12, 96(a0))
|
||||
EX2(s.d $f14, 112(a0))
|
||||
EX2(s.d $f16, 128(a0))
|
||||
EX2(s.d $f18, 144(a0))
|
||||
EX2(s.d $f20, 160(a0))
|
||||
EX2(s.d $f22, 176(a0))
|
||||
EX2(s.d $f24, 192(a0))
|
||||
EX2(s.d $f26, 208(a0))
|
||||
EX2(s.d $f28, 224(a0))
|
||||
EX2(s.d $f30, 240(a0))
|
||||
jr ra
|
||||
EX(sw t1, (a1))
|
||||
.set pop
|
||||
.set nomacro
|
||||
EX(sw t0,(SC_FPC_EIR)(a0))
|
||||
.set macro
|
||||
END(_save_fp_context)
|
||||
|
||||
/*
|
||||
* Restore FPU state:
|
||||
* - fp gp registers
|
||||
* - cp1 status/control register
|
||||
/**
|
||||
* _restore_fp_context() - restore FP context to the FPU
|
||||
* @a0 - pointer to fpregs field of sigcontext
|
||||
* @a1 - pointer to fpc_csr field of sigcontext
|
||||
*
|
||||
* We base the decision which registers to restore from the signal stack
|
||||
* frame on the current content of c0_status, not on the content of the
|
||||
* stack frame which might have been changed by the user.
|
||||
* Restore FP context, including the 32 FP data registers and the FP
|
||||
* control & status register, from signal context to the FPU.
|
||||
*/
|
||||
LEAF(_restore_fp_context)
|
||||
.set push
|
||||
SET_HARDFLOAT
|
||||
li v0, 0 # assume success
|
||||
EX(lw t0,(SC_FPC_CSR)(a0))
|
||||
EX(lwc1 $f0,(SC_FPREGS+0)(a0))
|
||||
EX(lwc1 $f1,(SC_FPREGS+8)(a0))
|
||||
EX(lwc1 $f2,(SC_FPREGS+16)(a0))
|
||||
EX(lwc1 $f3,(SC_FPREGS+24)(a0))
|
||||
EX(lwc1 $f4,(SC_FPREGS+32)(a0))
|
||||
EX(lwc1 $f5,(SC_FPREGS+40)(a0))
|
||||
EX(lwc1 $f6,(SC_FPREGS+48)(a0))
|
||||
EX(lwc1 $f7,(SC_FPREGS+56)(a0))
|
||||
EX(lwc1 $f8,(SC_FPREGS+64)(a0))
|
||||
EX(lwc1 $f9,(SC_FPREGS+72)(a0))
|
||||
EX(lwc1 $f10,(SC_FPREGS+80)(a0))
|
||||
EX(lwc1 $f11,(SC_FPREGS+88)(a0))
|
||||
EX(lwc1 $f12,(SC_FPREGS+96)(a0))
|
||||
EX(lwc1 $f13,(SC_FPREGS+104)(a0))
|
||||
EX(lwc1 $f14,(SC_FPREGS+112)(a0))
|
||||
EX(lwc1 $f15,(SC_FPREGS+120)(a0))
|
||||
EX(lwc1 $f16,(SC_FPREGS+128)(a0))
|
||||
EX(lwc1 $f17,(SC_FPREGS+136)(a0))
|
||||
EX(lwc1 $f18,(SC_FPREGS+144)(a0))
|
||||
EX(lwc1 $f19,(SC_FPREGS+152)(a0))
|
||||
EX(lwc1 $f20,(SC_FPREGS+160)(a0))
|
||||
EX(lwc1 $f21,(SC_FPREGS+168)(a0))
|
||||
EX(lwc1 $f22,(SC_FPREGS+176)(a0))
|
||||
EX(lwc1 $f23,(SC_FPREGS+184)(a0))
|
||||
EX(lwc1 $f24,(SC_FPREGS+192)(a0))
|
||||
EX(lwc1 $f25,(SC_FPREGS+200)(a0))
|
||||
EX(lwc1 $f26,(SC_FPREGS+208)(a0))
|
||||
EX(lwc1 $f27,(SC_FPREGS+216)(a0))
|
||||
EX(lwc1 $f28,(SC_FPREGS+224)(a0))
|
||||
EX(lwc1 $f29,(SC_FPREGS+232)(a0))
|
||||
EX(lwc1 $f30,(SC_FPREGS+240)(a0))
|
||||
EX(lwc1 $f31,(SC_FPREGS+248)(a0))
|
||||
EX(lw t0, (a1))
|
||||
EX2(l.d $f0, 0(a0))
|
||||
EX2(l.d $f2, 16(a0))
|
||||
EX2(l.d $f4, 32(a0))
|
||||
EX2(l.d $f6, 48(a0))
|
||||
EX2(l.d $f8, 64(a0))
|
||||
EX2(l.d $f10, 80(a0))
|
||||
EX2(l.d $f12, 96(a0))
|
||||
EX2(l.d $f14, 112(a0))
|
||||
EX2(l.d $f16, 128(a0))
|
||||
EX2(l.d $f18, 144(a0))
|
||||
EX2(l.d $f20, 160(a0))
|
||||
EX2(l.d $f22, 176(a0))
|
||||
EX2(l.d $f24, 192(a0))
|
||||
EX2(l.d $f26, 208(a0))
|
||||
EX2(l.d $f28, 224(a0))
|
||||
EX2(l.d $f30, 240(a0))
|
||||
jr ra
|
||||
ctc1 t0,fcr31
|
||||
ctc1 t0, fcr31
|
||||
.set pop
|
||||
END(_restore_fp_context)
|
||||
.set reorder
|
||||
|
|
|
@ -21,7 +21,14 @@
|
|||
.set push
|
||||
SET_HARDFLOAT
|
||||
|
||||
/* Save floating point context */
|
||||
/**
|
||||
* _save_fp_context() - save FP context from the FPU
|
||||
* @a0 - pointer to fpregs field of sigcontext
|
||||
* @a1 - pointer to fpc_csr field of sigcontext
|
||||
*
|
||||
* Save FP context, including the 32 FP data registers and the FP
|
||||
* control & status register, from the FPU to signal context.
|
||||
*/
|
||||
LEAF(_save_fp_context)
|
||||
mfc0 t0,CP0_STATUS
|
||||
sll t0,t0,2
|
||||
|
@ -30,59 +37,59 @@
|
|||
|
||||
cfc1 t1,fcr31
|
||||
/* Store the 16 double precision registers */
|
||||
sdc1 $f0,(SC_FPREGS+0)(a0)
|
||||
sdc1 $f2,(SC_FPREGS+16)(a0)
|
||||
sdc1 $f4,(SC_FPREGS+32)(a0)
|
||||
sdc1 $f6,(SC_FPREGS+48)(a0)
|
||||
sdc1 $f8,(SC_FPREGS+64)(a0)
|
||||
sdc1 $f10,(SC_FPREGS+80)(a0)
|
||||
sdc1 $f12,(SC_FPREGS+96)(a0)
|
||||
sdc1 $f14,(SC_FPREGS+112)(a0)
|
||||
sdc1 $f16,(SC_FPREGS+128)(a0)
|
||||
sdc1 $f18,(SC_FPREGS+144)(a0)
|
||||
sdc1 $f20,(SC_FPREGS+160)(a0)
|
||||
sdc1 $f22,(SC_FPREGS+176)(a0)
|
||||
sdc1 $f24,(SC_FPREGS+192)(a0)
|
||||
sdc1 $f26,(SC_FPREGS+208)(a0)
|
||||
sdc1 $f28,(SC_FPREGS+224)(a0)
|
||||
sdc1 $f30,(SC_FPREGS+240)(a0)
|
||||
sdc1 $f0,0(a0)
|
||||
sdc1 $f2,16(a0)
|
||||
sdc1 $f4,32(a0)
|
||||
sdc1 $f6,48(a0)
|
||||
sdc1 $f8,64(a0)
|
||||
sdc1 $f10,80(a0)
|
||||
sdc1 $f12,96(a0)
|
||||
sdc1 $f14,112(a0)
|
||||
sdc1 $f16,128(a0)
|
||||
sdc1 $f18,144(a0)
|
||||
sdc1 $f20,160(a0)
|
||||
sdc1 $f22,176(a0)
|
||||
sdc1 $f24,192(a0)
|
||||
sdc1 $f26,208(a0)
|
||||
sdc1 $f28,224(a0)
|
||||
sdc1 $f30,240(a0)
|
||||
jr ra
|
||||
sw t0,SC_FPC_CSR(a0)
|
||||
sw t0,(a1)
|
||||
1: jr ra
|
||||
nop
|
||||
END(_save_fp_context)
|
||||
|
||||
/* Restore FPU state:
|
||||
* - fp gp registers
|
||||
* - cp1 status/control register
|
||||
/**
|
||||
* _restore_fp_context() - restore FP context to the FPU
|
||||
* @a0 - pointer to fpregs field of sigcontext
|
||||
* @a1 - pointer to fpc_csr field of sigcontext
|
||||
*
|
||||
* We base the decision which registers to restore from the signal stack
|
||||
* frame on the current content of c0_status, not on the content of the
|
||||
* stack frame which might have been changed by the user.
|
||||
* Restore FP context, including the 32 FP data registers and the FP
|
||||
* control & status register, from signal context to the FPU.
|
||||
*/
|
||||
LEAF(_restore_fp_context)
|
||||
mfc0 t0,CP0_STATUS
|
||||
sll t0,t0,2
|
||||
|
||||
bgez t0,1f
|
||||
lw t0,SC_FPC_CSR(a0)
|
||||
lw t0,(a1)
|
||||
/* Restore the 16 double precision registers */
|
||||
ldc1 $f0,(SC_FPREGS+0)(a0)
|
||||
ldc1 $f2,(SC_FPREGS+16)(a0)
|
||||
ldc1 $f4,(SC_FPREGS+32)(a0)
|
||||
ldc1 $f6,(SC_FPREGS+48)(a0)
|
||||
ldc1 $f8,(SC_FPREGS+64)(a0)
|
||||
ldc1 $f10,(SC_FPREGS+80)(a0)
|
||||
ldc1 $f12,(SC_FPREGS+96)(a0)
|
||||
ldc1 $f14,(SC_FPREGS+112)(a0)
|
||||
ldc1 $f16,(SC_FPREGS+128)(a0)
|
||||
ldc1 $f18,(SC_FPREGS+144)(a0)
|
||||
ldc1 $f20,(SC_FPREGS+160)(a0)
|
||||
ldc1 $f22,(SC_FPREGS+176)(a0)
|
||||
ldc1 $f24,(SC_FPREGS+192)(a0)
|
||||
ldc1 $f26,(SC_FPREGS+208)(a0)
|
||||
ldc1 $f28,(SC_FPREGS+224)(a0)
|
||||
ldc1 $f30,(SC_FPREGS+240)(a0)
|
||||
ldc1 $f0,0(a0)
|
||||
ldc1 $f2,16(a0)
|
||||
ldc1 $f4,32(a0)
|
||||
ldc1 $f6,48(a0)
|
||||
ldc1 $f8,64(a0)
|
||||
ldc1 $f10,80(a0)
|
||||
ldc1 $f12,96(a0)
|
||||
ldc1 $f14,112(a0)
|
||||
ldc1 $f16,128(a0)
|
||||
ldc1 $f18,144(a0)
|
||||
ldc1 $f20,160(a0)
|
||||
ldc1 $f22,176(a0)
|
||||
ldc1 $f24,192(a0)
|
||||
ldc1 $f26,208(a0)
|
||||
ldc1 $f28,224(a0)
|
||||
ldc1 $f30,240(a0)
|
||||
jr ra
|
||||
ctc1 t0,fcr31
|
||||
1: jr ra
|
||||
|
|
|
@ -200,7 +200,7 @@ static inline __init unsigned long get_random_boot(void)
|
|||
|
||||
#if defined(CONFIG_USE_OF)
|
||||
/* Get any additional entropy passed in device tree */
|
||||
{
|
||||
if (initial_boot_params) {
|
||||
int node, len;
|
||||
u64 *prop;
|
||||
|
||||
|
|
|
@ -368,6 +368,19 @@ static void __init bootmem_init(void)
|
|||
end = PFN_DOWN(boot_mem_map.map[i].addr
|
||||
+ boot_mem_map.map[i].size);
|
||||
|
||||
#ifndef CONFIG_HIGHMEM
|
||||
/*
|
||||
* Skip highmem here so we get an accurate max_low_pfn if low
|
||||
* memory stops short of high memory.
|
||||
* If the region overlaps HIGHMEM_START, end is clipped so
|
||||
* max_pfn excludes the highmem portion.
|
||||
*/
|
||||
if (start >= PFN_DOWN(HIGHMEM_START))
|
||||
continue;
|
||||
if (end > PFN_DOWN(HIGHMEM_START))
|
||||
end = PFN_DOWN(HIGHMEM_START);
|
||||
#endif
|
||||
|
||||
if (end > max_low_pfn)
|
||||
max_low_pfn = end;
|
||||
if (start < min_low_pfn)
|
||||
|
|
|
@ -156,7 +156,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
|
|||
print_ip_sym(pc);
|
||||
pc = unwind_stack(task, &sp, pc, &ra);
|
||||
} while (pc);
|
||||
printk("\n");
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -174,22 +174,24 @@ static void show_stacktrace(struct task_struct *task,
|
|||
printk("Stack :");
|
||||
i = 0;
|
||||
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
|
||||
if (i && ((i % (64 / field)) == 0))
|
||||
printk("\n ");
|
||||
if (i && ((i % (64 / field)) == 0)) {
|
||||
pr_cont("\n");
|
||||
printk(" ");
|
||||
}
|
||||
if (i > 39) {
|
||||
printk(" ...");
|
||||
pr_cont(" ...");
|
||||
break;
|
||||
}
|
||||
|
||||
if (__get_user(stackdata, sp++)) {
|
||||
printk(" (Bad stack address)");
|
||||
pr_cont(" (Bad stack address)");
|
||||
break;
|
||||
}
|
||||
|
||||
printk(" %0*lx", field, stackdata);
|
||||
pr_cont(" %0*lx", field, stackdata);
|
||||
i++;
|
||||
}
|
||||
printk("\n");
|
||||
pr_cont("\n");
|
||||
show_backtrace(task, regs);
|
||||
}
|
||||
|
||||
|
@ -229,18 +231,19 @@ static void show_code(unsigned int __user *pc)
|
|||
long i;
|
||||
unsigned short __user *pc16 = NULL;
|
||||
|
||||
printk("\nCode:");
|
||||
printk("Code:");
|
||||
|
||||
if ((unsigned long)pc & 1)
|
||||
pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
|
||||
for(i = -3 ; i < 6 ; i++) {
|
||||
unsigned int insn;
|
||||
if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
|
||||
printk(" (Bad address in epc)\n");
|
||||
pr_cont(" (Bad address in epc)\n");
|
||||
break;
|
||||
}
|
||||
printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
|
||||
pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
|
||||
}
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
static void __show_regs(const struct pt_regs *regs)
|
||||
|
@ -259,15 +262,15 @@ static void __show_regs(const struct pt_regs *regs)
|
|||
if ((i % 4) == 0)
|
||||
printk("$%2d :", i);
|
||||
if (i == 0)
|
||||
printk(" %0*lx", field, 0UL);
|
||||
pr_cont(" %0*lx", field, 0UL);
|
||||
else if (i == 26 || i == 27)
|
||||
printk(" %*s", field, "");
|
||||
pr_cont(" %*s", field, "");
|
||||
else
|
||||
printk(" %0*lx", field, regs->regs[i]);
|
||||
pr_cont(" %0*lx", field, regs->regs[i]);
|
||||
|
||||
i++;
|
||||
if ((i % 4) == 0)
|
||||
printk("\n");
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_SMARTMIPS
|
||||
|
@ -288,46 +291,46 @@ static void __show_regs(const struct pt_regs *regs)
|
|||
|
||||
if (cpu_has_3kex) {
|
||||
if (regs->cp0_status & ST0_KUO)
|
||||
printk("KUo ");
|
||||
pr_cont("KUo ");
|
||||
if (regs->cp0_status & ST0_IEO)
|
||||
printk("IEo ");
|
||||
pr_cont("IEo ");
|
||||
if (regs->cp0_status & ST0_KUP)
|
||||
printk("KUp ");
|
||||
pr_cont("KUp ");
|
||||
if (regs->cp0_status & ST0_IEP)
|
||||
printk("IEp ");
|
||||
pr_cont("IEp ");
|
||||
if (regs->cp0_status & ST0_KUC)
|
||||
printk("KUc ");
|
||||
pr_cont("KUc ");
|
||||
if (regs->cp0_status & ST0_IEC)
|
||||
printk("IEc ");
|
||||
pr_cont("IEc ");
|
||||
} else if (cpu_has_4kex) {
|
||||
if (regs->cp0_status & ST0_KX)
|
||||
printk("KX ");
|
||||
pr_cont("KX ");
|
||||
if (regs->cp0_status & ST0_SX)
|
||||
printk("SX ");
|
||||
pr_cont("SX ");
|
||||
if (regs->cp0_status & ST0_UX)
|
||||
printk("UX ");
|
||||
pr_cont("UX ");
|
||||
switch (regs->cp0_status & ST0_KSU) {
|
||||
case KSU_USER:
|
||||
printk("USER ");
|
||||
pr_cont("USER ");
|
||||
break;
|
||||
case KSU_SUPERVISOR:
|
||||
printk("SUPERVISOR ");
|
||||
pr_cont("SUPERVISOR ");
|
||||
break;
|
||||
case KSU_KERNEL:
|
||||
printk("KERNEL ");
|
||||
pr_cont("KERNEL ");
|
||||
break;
|
||||
default:
|
||||
printk("BAD_MODE ");
|
||||
pr_cont("BAD_MODE ");
|
||||
break;
|
||||
}
|
||||
if (regs->cp0_status & ST0_ERL)
|
||||
printk("ERL ");
|
||||
pr_cont("ERL ");
|
||||
if (regs->cp0_status & ST0_EXL)
|
||||
printk("EXL ");
|
||||
pr_cont("EXL ");
|
||||
if (regs->cp0_status & ST0_IE)
|
||||
printk("IE ");
|
||||
pr_cont("IE ");
|
||||
}
|
||||
printk("\n");
|
||||
pr_cont("\n");
|
||||
|
||||
exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
|
||||
printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
|
||||
|
@ -705,6 +708,32 @@ asmlinkage void do_ov(struct pt_regs *regs)
|
|||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
/*
|
||||
* Send SIGFPE according to FCSR Cause bits, which must have already
|
||||
* been masked against Enable bits. This is impotant as Inexact can
|
||||
* happen together with Overflow or Underflow, and `ptrace' can set
|
||||
* any bits.
|
||||
*/
|
||||
void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
|
||||
|
||||
if (fcr31 & FPU_CSR_INV_X)
|
||||
si.si_code = FPE_FLTINV;
|
||||
else if (fcr31 & FPU_CSR_DIV_X)
|
||||
si.si_code = FPE_FLTDIV;
|
||||
else if (fcr31 & FPU_CSR_OVF_X)
|
||||
si.si_code = FPE_FLTOVF;
|
||||
else if (fcr31 & FPU_CSR_UDF_X)
|
||||
si.si_code = FPE_FLTUND;
|
||||
else if (fcr31 & FPU_CSR_INE_X)
|
||||
si.si_code = FPE_FLTRES;
|
||||
else
|
||||
si.si_code = __SI_FAULT;
|
||||
force_sig_info(SIGFPE, &si, tsk);
|
||||
}
|
||||
|
||||
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
|
||||
{
|
||||
struct siginfo si = { 0 };
|
||||
|
@ -715,27 +744,7 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
|
|||
return 0;
|
||||
|
||||
case SIGFPE:
|
||||
si.si_addr = fault_addr;
|
||||
si.si_signo = sig;
|
||||
/*
|
||||
* Inexact can happen together with Overflow or Underflow.
|
||||
* Respect the mask to deliver the correct exception.
|
||||
*/
|
||||
fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
|
||||
(ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
|
||||
if (fcr31 & FPU_CSR_INV_X)
|
||||
si.si_code = FPE_FLTINV;
|
||||
else if (fcr31 & FPU_CSR_DIV_X)
|
||||
si.si_code = FPE_FLTDIV;
|
||||
else if (fcr31 & FPU_CSR_OVF_X)
|
||||
si.si_code = FPE_FLTOVF;
|
||||
else if (fcr31 & FPU_CSR_UDF_X)
|
||||
si.si_code = FPE_FLTUND;
|
||||
else if (fcr31 & FPU_CSR_INE_X)
|
||||
si.si_code = FPE_FLTRES;
|
||||
else
|
||||
si.si_code = __SI_FAULT;
|
||||
force_sig_info(sig, &si, current);
|
||||
force_fcr31_sig(fcr31, fault_addr, current);
|
||||
return 1;
|
||||
|
||||
case SIGBUS:
|
||||
|
@ -799,13 +808,13 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
|
|||
/* Run the emulator */
|
||||
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
|
||||
&fault_addr);
|
||||
fcr31 = current->thread.fpu.fcr31;
|
||||
|
||||
/*
|
||||
* We can't allow the emulated instruction to leave any of
|
||||
* the cause bits set in $fcr31.
|
||||
* We can't allow the emulated instruction to leave any
|
||||
* enabled Cause bits set in $fcr31.
|
||||
*/
|
||||
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
|
||||
fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
|
||||
current->thread.fpu.fcr31 &= ~fcr31;
|
||||
|
||||
/* Restore the hardware register state */
|
||||
own_fpu(1);
|
||||
|
@ -831,7 +840,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
|
|||
goto out;
|
||||
|
||||
/* Clear FCSR.Cause before enabling interrupts */
|
||||
write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
|
||||
write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
|
||||
local_irq_enable();
|
||||
|
||||
die_if_kernel("FP exception in kernel code", regs);
|
||||
|
@ -853,13 +862,13 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
|
|||
/* Run the emulator */
|
||||
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
|
||||
&fault_addr);
|
||||
fcr31 = current->thread.fpu.fcr31;
|
||||
|
||||
/*
|
||||
* We can't allow the emulated instruction to leave any of
|
||||
* the cause bits set in $fcr31.
|
||||
* We can't allow the emulated instruction to leave any
|
||||
* enabled Cause bits set in $fcr31.
|
||||
*/
|
||||
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
|
||||
fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
|
||||
current->thread.fpu.fcr31 &= ~fcr31;
|
||||
|
||||
/* Restore the hardware register state */
|
||||
own_fpu(1); /* Using the FPU again. */
|
||||
|
@ -1424,13 +1433,13 @@ asmlinkage void do_cpu(struct pt_regs *regs)
|
|||
|
||||
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
|
||||
&fault_addr);
|
||||
fcr31 = current->thread.fpu.fcr31;
|
||||
|
||||
/*
|
||||
* We can't allow the emulated instruction to leave
|
||||
* any of the cause bits set in $fcr31.
|
||||
* any enabled Cause bits set in $fcr31.
|
||||
*/
|
||||
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
|
||||
fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
|
||||
current->thread.fpu.fcr31 &= ~fcr31;
|
||||
|
||||
/* Send a signal if required. */
|
||||
if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
|
||||
|
|
|
@ -790,15 +790,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
|
|||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
|
||||
if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
|
||||
if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
|
||||
kvm_clear_c0_guest_status(cop0, ST0_ERL);
|
||||
vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
|
||||
} else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
|
||||
kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
|
||||
kvm_read_c0_guest_epc(cop0));
|
||||
kvm_clear_c0_guest_status(cop0, ST0_EXL);
|
||||
vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
|
||||
|
||||
} else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
|
||||
kvm_clear_c0_guest_status(cop0, ST0_ERL);
|
||||
vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
|
||||
} else {
|
||||
kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
|
||||
vcpu->arch.pc);
|
||||
|
@ -1528,13 +1528,25 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
|
|||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
enum emulation_result er = EMULATE_DO_MMIO;
|
||||
unsigned long curr_pc;
|
||||
u32 op, rt;
|
||||
u32 bytes;
|
||||
|
||||
rt = inst.i_format.rt;
|
||||
op = inst.i_format.opcode;
|
||||
|
||||
vcpu->arch.pending_load_cause = cause;
|
||||
/*
|
||||
* Find the resume PC now while we have safe and easy access to the
|
||||
* prior branch instruction, and save it for
|
||||
* kvm_mips_complete_mmio_load() to restore later.
|
||||
*/
|
||||
curr_pc = vcpu->arch.pc;
|
||||
er = update_pc(vcpu, cause);
|
||||
if (er == EMULATE_FAIL)
|
||||
return er;
|
||||
vcpu->arch.io_pc = vcpu->arch.pc;
|
||||
vcpu->arch.pc = curr_pc;
|
||||
|
||||
vcpu->arch.io_gpr = rt;
|
||||
|
||||
switch (op) {
|
||||
|
@ -2494,9 +2506,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
|
|||
goto done;
|
||||
}
|
||||
|
||||
er = update_pc(vcpu, vcpu->arch.pending_load_cause);
|
||||
if (er == EMULATE_FAIL)
|
||||
return er;
|
||||
/* Restore saved resume PC */
|
||||
vcpu->arch.pc = vcpu->arch.io_pc;
|
||||
|
||||
switch (run->mmio.len) {
|
||||
case 4:
|
||||
|
@ -2518,11 +2529,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
|
|||
break;
|
||||
}
|
||||
|
||||
if (vcpu->arch.pending_load_cause & CAUSEF_BD)
|
||||
kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
|
||||
vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
|
||||
vcpu->mmio_needed);
|
||||
|
||||
done:
|
||||
return er;
|
||||
}
|
||||
|
|
|
@ -426,7 +426,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
|||
static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
int cpu = smp_processor_id();
|
||||
int i, cpu = smp_processor_id();
|
||||
unsigned int gasid;
|
||||
|
||||
/*
|
||||
|
@ -442,6 +442,9 @@ static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
|
|||
vcpu);
|
||||
vcpu->arch.guest_user_asid[cpu] =
|
||||
vcpu->arch.guest_user_mm.context.asid[cpu];
|
||||
for_each_possible_cpu(i)
|
||||
if (i != cpu)
|
||||
vcpu->arch.guest_user_asid[cpu] = 0;
|
||||
vcpu->arch.last_user_gasid = gasid;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -260,13 +260,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
|
||||
if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
|
||||
asid_version_mask(cpu)) {
|
||||
u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
|
||||
KVM_ENTRYHI_ASID;
|
||||
|
||||
kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
|
||||
vcpu->arch.guest_user_asid[cpu] =
|
||||
vcpu->arch.guest_user_mm.context.asid[cpu];
|
||||
vcpu->arch.last_user_gasid = gasid;
|
||||
newasid++;
|
||||
|
||||
kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
|
||||
|
|
|
@ -135,42 +135,42 @@ static void dump_tlb(int first, int last)
|
|||
c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
|
||||
c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
|
||||
|
||||
printk("va=%0*lx asid=%0*lx",
|
||||
vwidth, (entryhi & ~0x1fffUL),
|
||||
asidwidth, entryhi & asidmask);
|
||||
pr_cont("va=%0*lx asid=%0*lx",
|
||||
vwidth, (entryhi & ~0x1fffUL),
|
||||
asidwidth, entryhi & asidmask);
|
||||
if (cpu_has_guestid)
|
||||
printk(" gid=%02lx",
|
||||
(guestctl1 & MIPS_GCTL1_RID)
|
||||
pr_cont(" gid=%02lx",
|
||||
(guestctl1 & MIPS_GCTL1_RID)
|
||||
>> MIPS_GCTL1_RID_SHIFT);
|
||||
/* RI/XI are in awkward places, so mask them off separately */
|
||||
pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
|
||||
if (xpa)
|
||||
pa |= (unsigned long long)readx_c0_entrylo0() << 30;
|
||||
pa = (pa << 6) & PAGE_MASK;
|
||||
printk("\n\t[");
|
||||
pr_cont("\n\t[");
|
||||
if (cpu_has_rixi)
|
||||
printk("ri=%d xi=%d ",
|
||||
(entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
|
||||
(entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
|
||||
printk("pa=%0*llx c=%d d=%d v=%d g=%d] [",
|
||||
pwidth, pa, c0,
|
||||
(entrylo0 & ENTRYLO_D) ? 1 : 0,
|
||||
(entrylo0 & ENTRYLO_V) ? 1 : 0,
|
||||
(entrylo0 & ENTRYLO_G) ? 1 : 0);
|
||||
pr_cont("ri=%d xi=%d ",
|
||||
(entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
|
||||
(entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
|
||||
pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d] [",
|
||||
pwidth, pa, c0,
|
||||
(entrylo0 & ENTRYLO_D) ? 1 : 0,
|
||||
(entrylo0 & ENTRYLO_V) ? 1 : 0,
|
||||
(entrylo0 & ENTRYLO_G) ? 1 : 0);
|
||||
/* RI/XI are in awkward places, so mask them off separately */
|
||||
pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
|
||||
if (xpa)
|
||||
pa |= (unsigned long long)readx_c0_entrylo1() << 30;
|
||||
pa = (pa << 6) & PAGE_MASK;
|
||||
if (cpu_has_rixi)
|
||||
printk("ri=%d xi=%d ",
|
||||
(entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
|
||||
(entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
|
||||
printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
|
||||
pwidth, pa, c1,
|
||||
(entrylo1 & ENTRYLO_D) ? 1 : 0,
|
||||
(entrylo1 & ENTRYLO_V) ? 1 : 0,
|
||||
(entrylo1 & ENTRYLO_G) ? 1 : 0);
|
||||
pr_cont("ri=%d xi=%d ",
|
||||
(entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
|
||||
(entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
|
||||
pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
|
||||
pwidth, pa, c1,
|
||||
(entrylo1 & ENTRYLO_D) ? 1 : 0,
|
||||
(entrylo1 & ENTRYLO_V) ? 1 : 0,
|
||||
(entrylo1 & ENTRYLO_G) ? 1 : 0);
|
||||
}
|
||||
printk("\n");
|
||||
|
||||
|
|
|
@ -53,15 +53,15 @@ static void dump_tlb(int first, int last)
|
|||
*/
|
||||
printk("Index: %2d ", i);
|
||||
|
||||
printk("va=%08lx asid=%08lx"
|
||||
" [pa=%06lx n=%d d=%d v=%d g=%d]",
|
||||
entryhi & PAGE_MASK,
|
||||
entryhi & asid_mask,
|
||||
entrylo0 & PAGE_MASK,
|
||||
(entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
|
||||
(entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
|
||||
(entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
|
||||
(entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
|
||||
pr_cont("va=%08lx asid=%08lx"
|
||||
" [pa=%06lx n=%d d=%d v=%d g=%d]",
|
||||
entryhi & PAGE_MASK,
|
||||
entryhi & asid_mask,
|
||||
entrylo0 & PAGE_MASK,
|
||||
(entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
|
||||
(entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
|
||||
(entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
|
||||
(entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
|
||||
}
|
||||
}
|
||||
printk("\n");
|
||||
|
|
|
@ -324,6 +324,7 @@ static int __init nios2_time_init(struct device_node *timer)
|
|||
ret = nios2_clocksource_init(timer);
|
||||
break;
|
||||
default:
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
* they shouldn't be hard-coded!
|
||||
*/
|
||||
|
||||
#define __ro_after_init __read_mostly
|
||||
|
||||
#define L1_CACHE_BYTES 16
|
||||
#define L1_CACHE_SHIFT 4
|
||||
|
||||
|
|
|
@ -368,7 +368,9 @@
|
|||
|
||||
#define __IGNORE_select /* newselect */
|
||||
#define __IGNORE_fadvise64 /* fadvise64_64 */
|
||||
|
||||
#define __IGNORE_pkey_mprotect
|
||||
#define __IGNORE_pkey_alloc
|
||||
#define __IGNORE_pkey_free
|
||||
|
||||
#define LINUX_GATEWAY_ADDR 0x100
|
||||
|
||||
|
|
|
@ -873,11 +873,11 @@ static void print_parisc_device(struct parisc_device *dev)
|
|||
|
||||
if (dev->num_addrs) {
|
||||
int k;
|
||||
printk(", additional addresses: ");
|
||||
pr_cont(", additional addresses: ");
|
||||
for (k = 0; k < dev->num_addrs; k++)
|
||||
printk("0x%lx ", dev->addr[k]);
|
||||
pr_cont("0x%lx ", dev->addr[k]);
|
||||
}
|
||||
printk("\n");
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -100,14 +100,12 @@ set_thread_pointer:
|
|||
.endr
|
||||
|
||||
/* This address must remain fixed at 0x100 for glibc's syscalls to work */
|
||||
.align 256
|
||||
.align LINUX_GATEWAY_ADDR
|
||||
linux_gateway_entry:
|
||||
gate .+8, %r0 /* become privileged */
|
||||
mtsp %r0,%sr4 /* get kernel space into sr4 */
|
||||
mtsp %r0,%sr5 /* get kernel space into sr5 */
|
||||
mtsp %r0,%sr6 /* get kernel space into sr6 */
|
||||
mfsp %sr7,%r1 /* save user sr7 */
|
||||
mtsp %r1,%sr3 /* and store it in sr3 */
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
/* for now we can *always* set the W bit on entry to the syscall
|
||||
|
@ -133,6 +131,14 @@ linux_gateway_entry:
|
|||
depdi 0, 31, 32, %r21
|
||||
1:
|
||||
#endif
|
||||
|
||||
/* We use a rsm/ssm pair to prevent sr3 from being clobbered
|
||||
* by external interrupts.
|
||||
*/
|
||||
mfsp %sr7,%r1 /* save user sr7 */
|
||||
rsm PSW_SM_I, %r0 /* disable interrupts */
|
||||
mtsp %r1,%sr3 /* and store it in sr3 */
|
||||
|
||||
mfctl %cr30,%r1
|
||||
xor %r1,%r30,%r30 /* ye olde xor trick */
|
||||
xor %r1,%r30,%r1
|
||||
|
@ -147,6 +153,7 @@ linux_gateway_entry:
|
|||
*/
|
||||
|
||||
mtsp %r0,%sr7 /* get kernel space into sr7 */
|
||||
ssm PSW_SM_I, %r0 /* enable interrupts */
|
||||
STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
|
||||
mfctl %cr30,%r1 /* get task ptr in %r1 */
|
||||
LDREG TI_TASK(%r1),%r1
|
||||
|
@ -474,11 +481,6 @@ lws_start:
|
|||
comiclr,>> __NR_lws_entries, %r20, %r0
|
||||
b,n lws_exit_nosys
|
||||
|
||||
/* WARNING: Trashing sr2 and sr3 */
|
||||
mfsp %sr7,%r1 /* get userspace into sr3 */
|
||||
mtsp %r1,%sr3
|
||||
mtsp %r0,%sr2 /* get kernel space into sr2 */
|
||||
|
||||
/* Load table start */
|
||||
ldil L%lws_table, %r1
|
||||
ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */
|
||||
|
@ -627,9 +629,9 @@ cas_action:
|
|||
stw %r1, 4(%sr2,%r20)
|
||||
#endif
|
||||
/* The load and store could fail */
|
||||
1: ldw,ma 0(%sr3,%r26), %r28
|
||||
1: ldw,ma 0(%r26), %r28
|
||||
sub,<> %r28, %r25, %r0
|
||||
2: stw,ma %r24, 0(%sr3,%r26)
|
||||
2: stw,ma %r24, 0(%r26)
|
||||
/* Free lock */
|
||||
stw,ma %r20, 0(%sr2,%r20)
|
||||
#if ENABLE_LWS_DEBUG
|
||||
|
@ -706,9 +708,9 @@ lws_compare_and_swap_2:
|
|||
nop
|
||||
|
||||
/* 8bit load */
|
||||
4: ldb 0(%sr3,%r25), %r25
|
||||
4: ldb 0(%r25), %r25
|
||||
b cas2_lock_start
|
||||
5: ldb 0(%sr3,%r24), %r24
|
||||
5: ldb 0(%r24), %r24
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
@ -716,9 +718,9 @@ lws_compare_and_swap_2:
|
|||
nop
|
||||
|
||||
/* 16bit load */
|
||||
6: ldh 0(%sr3,%r25), %r25
|
||||
6: ldh 0(%r25), %r25
|
||||
b cas2_lock_start
|
||||
7: ldh 0(%sr3,%r24), %r24
|
||||
7: ldh 0(%r24), %r24
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
@ -726,9 +728,9 @@ lws_compare_and_swap_2:
|
|||
nop
|
||||
|
||||
/* 32bit load */
|
||||
8: ldw 0(%sr3,%r25), %r25
|
||||
8: ldw 0(%r25), %r25
|
||||
b cas2_lock_start
|
||||
9: ldw 0(%sr3,%r24), %r24
|
||||
9: ldw 0(%r24), %r24
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
@ -737,14 +739,14 @@ lws_compare_and_swap_2:
|
|||
|
||||
/* 64bit load */
|
||||
#ifdef CONFIG_64BIT
|
||||
10: ldd 0(%sr3,%r25), %r25
|
||||
11: ldd 0(%sr3,%r24), %r24
|
||||
10: ldd 0(%r25), %r25
|
||||
11: ldd 0(%r24), %r24
|
||||
#else
|
||||
/* Load new value into r22/r23 - high/low */
|
||||
10: ldw 0(%sr3,%r25), %r22
|
||||
11: ldw 4(%sr3,%r25), %r23
|
||||
10: ldw 0(%r25), %r22
|
||||
11: ldw 4(%r25), %r23
|
||||
/* Load new value into fr4 for atomic store later */
|
||||
12: flddx 0(%sr3,%r24), %fr4
|
||||
12: flddx 0(%r24), %fr4
|
||||
#endif
|
||||
|
||||
cas2_lock_start:
|
||||
|
@ -794,30 +796,30 @@ cas2_action:
|
|||
ldo 1(%r0),%r28
|
||||
|
||||
/* 8bit CAS */
|
||||
13: ldb,ma 0(%sr3,%r26), %r29
|
||||
13: ldb,ma 0(%r26), %r29
|
||||
sub,= %r29, %r25, %r0
|
||||
b,n cas2_end
|
||||
14: stb,ma %r24, 0(%sr3,%r26)
|
||||
14: stb,ma %r24, 0(%r26)
|
||||
b cas2_end
|
||||
copy %r0, %r28
|
||||
nop
|
||||
nop
|
||||
|
||||
/* 16bit CAS */
|
||||
15: ldh,ma 0(%sr3,%r26), %r29
|
||||
15: ldh,ma 0(%r26), %r29
|
||||
sub,= %r29, %r25, %r0
|
||||
b,n cas2_end
|
||||
16: sth,ma %r24, 0(%sr3,%r26)
|
||||
16: sth,ma %r24, 0(%r26)
|
||||
b cas2_end
|
||||
copy %r0, %r28
|
||||
nop
|
||||
nop
|
||||
|
||||
/* 32bit CAS */
|
||||
17: ldw,ma 0(%sr3,%r26), %r29
|
||||
17: ldw,ma 0(%r26), %r29
|
||||
sub,= %r29, %r25, %r0
|
||||
b,n cas2_end
|
||||
18: stw,ma %r24, 0(%sr3,%r26)
|
||||
18: stw,ma %r24, 0(%r26)
|
||||
b cas2_end
|
||||
copy %r0, %r28
|
||||
nop
|
||||
|
@ -825,22 +827,22 @@ cas2_action:
|
|||
|
||||
/* 64bit CAS */
|
||||
#ifdef CONFIG_64BIT
|
||||
19: ldd,ma 0(%sr3,%r26), %r29
|
||||
19: ldd,ma 0(%r26), %r29
|
||||
sub,*= %r29, %r25, %r0
|
||||
b,n cas2_end
|
||||
20: std,ma %r24, 0(%sr3,%r26)
|
||||
20: std,ma %r24, 0(%r26)
|
||||
copy %r0, %r28
|
||||
#else
|
||||
/* Compare first word */
|
||||
19: ldw,ma 0(%sr3,%r26), %r29
|
||||
19: ldw,ma 0(%r26), %r29
|
||||
sub,= %r29, %r22, %r0
|
||||
b,n cas2_end
|
||||
/* Compare second word */
|
||||
20: ldw,ma 4(%sr3,%r26), %r29
|
||||
20: ldw,ma 4(%r26), %r29
|
||||
sub,= %r29, %r23, %r0
|
||||
b,n cas2_end
|
||||
/* Perform the store */
|
||||
21: fstdx %fr4, 0(%sr3,%r26)
|
||||
21: fstdx %fr4, 0(%r26)
|
||||
copy %r0, %r28
|
||||
#endif
|
||||
|
||||
|
|
|
@ -363,11 +363,11 @@ out:
|
|||
static int diag224_get_name_table(void)
|
||||
{
|
||||
/* memory must be below 2GB */
|
||||
diag224_cpu_names = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
|
||||
diag224_cpu_names = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
|
||||
if (!diag224_cpu_names)
|
||||
return -ENOMEM;
|
||||
if (diag224(diag224_cpu_names)) {
|
||||
kfree(diag224_cpu_names);
|
||||
free_page((unsigned long) diag224_cpu_names);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
|
||||
|
@ -376,7 +376,7 @@ static int diag224_get_name_table(void)
|
|||
|
||||
static void diag224_delete_name_table(void)
|
||||
{
|
||||
kfree(diag224_cpu_names);
|
||||
free_page((unsigned long) diag224_cpu_names);
|
||||
}
|
||||
|
||||
static int diag224_idx2name(int index, char *name)
|
||||
|
|
|
@ -62,9 +62,11 @@ SECTIONS
|
|||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__start_ro_after_init = .;
|
||||
__start_data_ro_after_init = .;
|
||||
.data..ro_after_init : {
|
||||
*(.data..ro_after_init)
|
||||
}
|
||||
__end_data_ro_after_init = .;
|
||||
EXCEPTION_TABLE(16)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__end_ro_after_init = .;
|
||||
|
|
|
@ -315,7 +315,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
|
|||
if (r < 0)
|
||||
goto out;
|
||||
|
||||
diag224_buf = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
|
||||
diag224_buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
|
||||
if (!diag224_buf || diag224(diag224_buf))
|
||||
goto out;
|
||||
|
||||
|
@ -378,7 +378,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
|
|||
sctns->par.infpval1 |= PAR_WGHT_VLD;
|
||||
|
||||
out:
|
||||
kfree(diag224_buf);
|
||||
free_page((unsigned long)diag224_buf);
|
||||
vfree(diag204_buf);
|
||||
}
|
||||
|
||||
|
|
|
@ -423,7 +423,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
dma_addr_t dma_addr_base, dma_addr;
|
||||
int flags = ZPCI_PTE_VALID;
|
||||
struct scatterlist *s;
|
||||
unsigned long pa;
|
||||
unsigned long pa = 0;
|
||||
int ret;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
|
|
@ -888,7 +888,7 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
|
|||
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
|
||||
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
|
||||
struct scatter_walk src_sg_walk;
|
||||
struct scatter_walk dst_sg_walk;
|
||||
struct scatter_walk dst_sg_walk = {};
|
||||
unsigned int i;
|
||||
|
||||
/* Assuming we are supporting rfc4106 64-bit extended */
|
||||
|
@ -968,7 +968,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
|
|||
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
|
||||
u8 authTag[16];
|
||||
struct scatter_walk src_sg_walk;
|
||||
struct scatter_walk dst_sg_walk;
|
||||
struct scatter_walk dst_sg_walk = {};
|
||||
unsigned int i;
|
||||
|
||||
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
|
||||
|
|
|
@ -36,13 +36,6 @@ static DEFINE_PER_CPU(struct pt, pt_ctx);
|
|||
|
||||
static struct pt_pmu pt_pmu;
|
||||
|
||||
enum cpuid_regs {
|
||||
CR_EAX = 0,
|
||||
CR_ECX,
|
||||
CR_EDX,
|
||||
CR_EBX
|
||||
};
|
||||
|
||||
/*
|
||||
* Capabilities of Intel PT hardware, such as number of address bits or
|
||||
* supported output schemes, are cached and exported to userspace as "caps"
|
||||
|
@ -64,21 +57,21 @@ static struct pt_cap_desc {
|
|||
u8 reg;
|
||||
u32 mask;
|
||||
} pt_caps[] = {
|
||||
PT_CAP(max_subleaf, 0, CR_EAX, 0xffffffff),
|
||||
PT_CAP(cr3_filtering, 0, CR_EBX, BIT(0)),
|
||||
PT_CAP(psb_cyc, 0, CR_EBX, BIT(1)),
|
||||
PT_CAP(ip_filtering, 0, CR_EBX, BIT(2)),
|
||||
PT_CAP(mtc, 0, CR_EBX, BIT(3)),
|
||||
PT_CAP(ptwrite, 0, CR_EBX, BIT(4)),
|
||||
PT_CAP(power_event_trace, 0, CR_EBX, BIT(5)),
|
||||
PT_CAP(topa_output, 0, CR_ECX, BIT(0)),
|
||||
PT_CAP(topa_multiple_entries, 0, CR_ECX, BIT(1)),
|
||||
PT_CAP(single_range_output, 0, CR_ECX, BIT(2)),
|
||||
PT_CAP(payloads_lip, 0, CR_ECX, BIT(31)),
|
||||
PT_CAP(num_address_ranges, 1, CR_EAX, 0x3),
|
||||
PT_CAP(mtc_periods, 1, CR_EAX, 0xffff0000),
|
||||
PT_CAP(cycle_thresholds, 1, CR_EBX, 0xffff),
|
||||
PT_CAP(psb_periods, 1, CR_EBX, 0xffff0000),
|
||||
PT_CAP(max_subleaf, 0, CPUID_EAX, 0xffffffff),
|
||||
PT_CAP(cr3_filtering, 0, CPUID_EBX, BIT(0)),
|
||||
PT_CAP(psb_cyc, 0, CPUID_EBX, BIT(1)),
|
||||
PT_CAP(ip_filtering, 0, CPUID_EBX, BIT(2)),
|
||||
PT_CAP(mtc, 0, CPUID_EBX, BIT(3)),
|
||||
PT_CAP(ptwrite, 0, CPUID_EBX, BIT(4)),
|
||||
PT_CAP(power_event_trace, 0, CPUID_EBX, BIT(5)),
|
||||
PT_CAP(topa_output, 0, CPUID_ECX, BIT(0)),
|
||||
PT_CAP(topa_multiple_entries, 0, CPUID_ECX, BIT(1)),
|
||||
PT_CAP(single_range_output, 0, CPUID_ECX, BIT(2)),
|
||||
PT_CAP(payloads_lip, 0, CPUID_ECX, BIT(31)),
|
||||
PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x3),
|
||||
PT_CAP(mtc_periods, 1, CPUID_EAX, 0xffff0000),
|
||||
PT_CAP(cycle_thresholds, 1, CPUID_EBX, 0xffff),
|
||||
PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000),
|
||||
};
|
||||
|
||||
static u32 pt_cap_get(enum pt_capabilities cap)
|
||||
|
@ -213,10 +206,10 @@ static int __init pt_pmu_hw_init(void)
|
|||
|
||||
for (i = 0; i < PT_CPUID_LEAVES; i++) {
|
||||
cpuid_count(20, i,
|
||||
&pt_pmu.caps[CR_EAX + i*PT_CPUID_REGS_NUM],
|
||||
&pt_pmu.caps[CR_EBX + i*PT_CPUID_REGS_NUM],
|
||||
&pt_pmu.caps[CR_ECX + i*PT_CPUID_REGS_NUM],
|
||||
&pt_pmu.caps[CR_EDX + i*PT_CPUID_REGS_NUM]);
|
||||
&pt_pmu.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM],
|
||||
&pt_pmu.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM],
|
||||
&pt_pmu.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM],
|
||||
&pt_pmu.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM]);
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -226,6 +226,7 @@
|
|||
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
|
||||
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
|
||||
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
|
||||
#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
|
||||
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
|
||||
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
|
||||
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
|
||||
|
@ -279,6 +280,7 @@
|
|||
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
|
||||
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
|
||||
#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
|
||||
#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
|
||||
|
||||
|
|
|
@ -948,7 +948,6 @@ struct kvm_x86_ops {
|
|||
int (*get_lpage_level)(void);
|
||||
bool (*rdtscp_supported)(void);
|
||||
bool (*invpcid_supported)(void);
|
||||
void (*adjust_tsc_offset_guest)(struct kvm_vcpu *vcpu, s64 adjustment);
|
||||
|
||||
void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
|
||||
|
||||
|
@ -958,8 +957,6 @@ struct kvm_x86_ops {
|
|||
|
||||
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
|
||||
|
||||
u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
|
||||
|
||||
void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
|
||||
|
||||
int (*check_intercept)(struct kvm_vcpu *vcpu,
|
||||
|
|
|
@ -137,6 +137,17 @@ struct cpuinfo_x86 {
|
|||
u32 microcode;
|
||||
};
|
||||
|
||||
struct cpuid_regs {
|
||||
u32 eax, ebx, ecx, edx;
|
||||
};
|
||||
|
||||
enum cpuid_regs_idx {
|
||||
CPUID_EAX = 0,
|
||||
CPUID_EBX,
|
||||
CPUID_ECX,
|
||||
CPUID_EDX,
|
||||
};
|
||||
|
||||
#define X86_VENDOR_INTEL 0
|
||||
#define X86_VENDOR_CYRIX 1
|
||||
#define X86_VENDOR_AMD 2
|
||||
|
@ -178,6 +189,9 @@ extern void identify_secondary_cpu(struct cpuinfo_x86 *);
|
|||
extern void print_cpu_info(struct cpuinfo_x86 *);
|
||||
void print_cpu_msr(struct cpuinfo_x86 *);
|
||||
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
|
||||
extern u32 get_scattered_cpuid_leaf(unsigned int level,
|
||||
unsigned int sub_leaf,
|
||||
enum cpuid_regs_idx reg);
|
||||
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
|
||||
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
|
||||
|
||||
|
|
|
@ -1042,8 +1042,11 @@ static int apm_get_power_status(u_short *status, u_short *bat, u_short *life)
|
|||
|
||||
if (apm_info.get_power_status_broken)
|
||||
return APM_32_UNSUPPORTED;
|
||||
if (apm_bios_call(&call))
|
||||
if (apm_bios_call(&call)) {
|
||||
if (!call.err)
|
||||
return APM_NO_ERROR;
|
||||
return call.err;
|
||||
}
|
||||
*status = call.ebx;
|
||||
*bat = call.ecx;
|
||||
if (apm_info.get_power_status_swabinminutes) {
|
||||
|
|
|
@ -17,11 +17,17 @@ struct cpuid_bit {
|
|||
u32 sub_leaf;
|
||||
};
|
||||
|
||||
enum cpuid_regs {
|
||||
CR_EAX = 0,
|
||||
CR_ECX,
|
||||
CR_EDX,
|
||||
CR_EBX
|
||||
/* Please keep the leaf sorted by cpuid_bit.level for faster search. */
|
||||
static const struct cpuid_bit cpuid_bits[] = {
|
||||
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
|
||||
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
|
||||
{ X86_FEATURE_INTEL_PT, CPUID_EBX, 25, 0x00000007, 0 },
|
||||
{ X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 },
|
||||
{ X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 },
|
||||
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
|
||||
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
|
||||
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
|
||||
{ 0, 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
|
||||
|
@ -30,18 +36,6 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
|
|||
u32 regs[4];
|
||||
const struct cpuid_bit *cb;
|
||||
|
||||
static const struct cpuid_bit cpuid_bits[] = {
|
||||
{ X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 },
|
||||
{ X86_FEATURE_AVX512_4VNNIW, CR_EDX, 2, 0x00000007, 0 },
|
||||
{ X86_FEATURE_AVX512_4FMAPS, CR_EDX, 3, 0x00000007, 0 },
|
||||
{ X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
|
||||
{ X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
|
||||
{ X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
|
||||
{ X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 },
|
||||
{ X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 },
|
||||
{ 0, 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
for (cb = cpuid_bits; cb->feature; cb++) {
|
||||
|
||||
/* Verify that the level is valid */
|
||||
|
@ -50,10 +44,35 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
|
|||
max_level > (cb->level | 0xffff))
|
||||
continue;
|
||||
|
||||
cpuid_count(cb->level, cb->sub_leaf, ®s[CR_EAX],
|
||||
®s[CR_EBX], ®s[CR_ECX], ®s[CR_EDX]);
|
||||
cpuid_count(cb->level, cb->sub_leaf, ®s[CPUID_EAX],
|
||||
®s[CPUID_EBX], ®s[CPUID_ECX],
|
||||
®s[CPUID_EDX]);
|
||||
|
||||
if (regs[cb->reg] & (1 << cb->bit))
|
||||
set_cpu_cap(c, cb->feature);
|
||||
}
|
||||
}
|
||||
|
||||
u32 get_scattered_cpuid_leaf(unsigned int level, unsigned int sub_leaf,
|
||||
enum cpuid_regs_idx reg)
|
||||
{
|
||||
const struct cpuid_bit *cb;
|
||||
u32 cpuid_val = 0;
|
||||
|
||||
for (cb = cpuid_bits; cb->feature; cb++) {
|
||||
|
||||
if (level > cb->level)
|
||||
continue;
|
||||
|
||||
if (level < cb->level)
|
||||
break;
|
||||
|
||||
if (reg == cb->reg && sub_leaf == cb->sub_leaf) {
|
||||
if (cpu_has(&boot_cpu_data, cb->feature))
|
||||
cpuid_val |= BIT(cb->bit);
|
||||
}
|
||||
}
|
||||
|
||||
return cpuid_val;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_scattered_cpuid_leaf);
|
||||
|
|
|
@ -46,10 +46,6 @@
|
|||
|
||||
static struct class *cpuid_class;
|
||||
|
||||
struct cpuid_regs {
|
||||
u32 eax, ebx, ecx, edx;
|
||||
};
|
||||
|
||||
static void cpuid_smp_cpuid(void *cmd_block)
|
||||
{
|
||||
struct cpuid_regs *cmd = (struct cpuid_regs *)cmd_block;
|
||||
|
|
|
@ -65,6 +65,7 @@ void fpu__xstate_clear_all_cpu_caps(void)
|
|||
setup_clear_cpu_cap(X86_FEATURE_AVX);
|
||||
setup_clear_cpu_cap(X86_FEATURE_AVX2);
|
||||
setup_clear_cpu_cap(X86_FEATURE_AVX512F);
|
||||
setup_clear_cpu_cap(X86_FEATURE_AVX512IFMA);
|
||||
setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
|
||||
setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
|
||||
setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
|
||||
|
@ -73,6 +74,7 @@ void fpu__xstate_clear_all_cpu_caps(void)
|
|||
setup_clear_cpu_cap(X86_FEATURE_AVX512VL);
|
||||
setup_clear_cpu_cap(X86_FEATURE_MPX);
|
||||
setup_clear_cpu_cap(X86_FEATURE_XGETBV1);
|
||||
setup_clear_cpu_cap(X86_FEATURE_AVX512VBMI);
|
||||
setup_clear_cpu_cap(X86_FEATURE_PKU);
|
||||
setup_clear_cpu_cap(X86_FEATURE_AVX512_4VNNIW);
|
||||
setup_clear_cpu_cap(X86_FEATURE_AVX512_4FMAPS);
|
||||
|
|
|
@ -5045,7 +5045,7 @@ done_prefixes:
|
|||
/* Decode and fetch the destination operand: register or memory. */
|
||||
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
|
||||
|
||||
if (ctxt->rip_relative)
|
||||
if (ctxt->rip_relative && likely(ctxt->memopp))
|
||||
ctxt->memopp->addr.mem.ea = address_mask(ctxt,
|
||||
ctxt->memopp->addr.mem.ea + ctxt->_eip);
|
||||
|
||||
|
|
|
@ -1138,21 +1138,6 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
|
|||
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
|
||||
}
|
||||
|
||||
static void svm_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
svm->vmcb->control.tsc_offset += adjustment;
|
||||
if (is_guest_mode(vcpu))
|
||||
svm->nested.hsave->control.tsc_offset += adjustment;
|
||||
else
|
||||
trace_kvm_write_tsc_offset(vcpu->vcpu_id,
|
||||
svm->vmcb->control.tsc_offset - adjustment,
|
||||
svm->vmcb->control.tsc_offset);
|
||||
|
||||
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
|
||||
}
|
||||
|
||||
static void avic_init_vmcb(struct vcpu_svm *svm)
|
||||
{
|
||||
struct vmcb *vmcb = svm->vmcb;
|
||||
|
@ -3449,12 +3434,6 @@ static int cr8_write_interception(struct vcpu_svm *svm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
|
||||
{
|
||||
struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
|
||||
return vmcb->control.tsc_offset + host_tsc;
|
||||
}
|
||||
|
||||
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
@ -5422,8 +5401,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
|
|||
.has_wbinvd_exit = svm_has_wbinvd_exit,
|
||||
|
||||
.write_tsc_offset = svm_write_tsc_offset,
|
||||
.adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest,
|
||||
.read_l1_tsc = svm_read_l1_tsc,
|
||||
|
||||
.set_tdp_cr3 = set_tdp_cr3,
|
||||
|
||||
|
|
|
@ -187,6 +187,7 @@ struct vmcs {
|
|||
*/
|
||||
struct loaded_vmcs {
|
||||
struct vmcs *vmcs;
|
||||
struct vmcs *shadow_vmcs;
|
||||
int cpu;
|
||||
int launched;
|
||||
struct list_head loaded_vmcss_on_cpu_link;
|
||||
|
@ -411,7 +412,6 @@ struct nested_vmx {
|
|||
* memory during VMXOFF, VMCLEAR, VMPTRLD.
|
||||
*/
|
||||
struct vmcs12 *cached_vmcs12;
|
||||
struct vmcs *current_shadow_vmcs;
|
||||
/*
|
||||
* Indicates if the shadow vmcs must be updated with the
|
||||
* data hold by vmcs12
|
||||
|
@ -421,7 +421,6 @@ struct nested_vmx {
|
|||
/* vmcs02_list cache of VMCSs recently used to run L2 guests */
|
||||
struct list_head vmcs02_pool;
|
||||
int vmcs02_num;
|
||||
u64 vmcs01_tsc_offset;
|
||||
bool change_vmcs01_virtual_x2apic_mode;
|
||||
/* L2 must run next, and mustn't decide to exit to L1. */
|
||||
bool nested_run_pending;
|
||||
|
@ -1419,6 +1418,8 @@ static void vmcs_clear(struct vmcs *vmcs)
|
|||
static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
|
||||
{
|
||||
vmcs_clear(loaded_vmcs->vmcs);
|
||||
if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
|
||||
vmcs_clear(loaded_vmcs->shadow_vmcs);
|
||||
loaded_vmcs->cpu = -1;
|
||||
loaded_vmcs->launched = 0;
|
||||
}
|
||||
|
@ -2604,20 +2605,6 @@ static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
|
|||
return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* Like guest_read_tsc, but always returns L1's notion of the timestamp
|
||||
* counter, even if a nested guest (L2) is currently running.
|
||||
*/
|
||||
static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
|
||||
{
|
||||
u64 tsc_offset;
|
||||
|
||||
tsc_offset = is_guest_mode(vcpu) ?
|
||||
to_vmx(vcpu)->nested.vmcs01_tsc_offset :
|
||||
vmcs_read64(TSC_OFFSET);
|
||||
return host_tsc + tsc_offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* writes 'offset' into guest's timestamp counter offset register
|
||||
*/
|
||||
|
@ -2631,7 +2618,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
|
|||
* to the newly set TSC to get L2's TSC.
|
||||
*/
|
||||
struct vmcs12 *vmcs12;
|
||||
to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
|
||||
/* recalculate vmcs02.TSC_OFFSET: */
|
||||
vmcs12 = get_vmcs12(vcpu);
|
||||
vmcs_write64(TSC_OFFSET, offset +
|
||||
|
@ -2644,19 +2630,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
|
|||
}
|
||||
}
|
||||
|
||||
static void vmx_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
|
||||
{
|
||||
u64 offset = vmcs_read64(TSC_OFFSET);
|
||||
|
||||
vmcs_write64(TSC_OFFSET, offset + adjustment);
|
||||
if (is_guest_mode(vcpu)) {
|
||||
/* Even when running L2, the adjustment needs to apply to L1 */
|
||||
to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
|
||||
} else
|
||||
trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset,
|
||||
offset + adjustment);
|
||||
}
|
||||
|
||||
static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
|
||||
|
@ -3562,6 +3535,7 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
|
|||
loaded_vmcs_clear(loaded_vmcs);
|
||||
free_vmcs(loaded_vmcs->vmcs);
|
||||
loaded_vmcs->vmcs = NULL;
|
||||
WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
|
||||
}
|
||||
|
||||
static void free_kvm_area(void)
|
||||
|
@ -6696,6 +6670,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
|
|||
if (!item)
|
||||
return NULL;
|
||||
item->vmcs02.vmcs = alloc_vmcs();
|
||||
item->vmcs02.shadow_vmcs = NULL;
|
||||
if (!item->vmcs02.vmcs) {
|
||||
kfree(item);
|
||||
return NULL;
|
||||
|
@ -7072,7 +7047,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
|
|||
shadow_vmcs->revision_id |= (1u << 31);
|
||||
/* init shadow vmcs */
|
||||
vmcs_clear(shadow_vmcs);
|
||||
vmx->nested.current_shadow_vmcs = shadow_vmcs;
|
||||
vmx->vmcs01.shadow_vmcs = shadow_vmcs;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
|
||||
|
@ -7174,8 +7149,11 @@ static void free_nested(struct vcpu_vmx *vmx)
|
|||
free_page((unsigned long)vmx->nested.msr_bitmap);
|
||||
vmx->nested.msr_bitmap = NULL;
|
||||
}
|
||||
if (enable_shadow_vmcs)
|
||||
free_vmcs(vmx->nested.current_shadow_vmcs);
|
||||
if (enable_shadow_vmcs) {
|
||||
vmcs_clear(vmx->vmcs01.shadow_vmcs);
|
||||
free_vmcs(vmx->vmcs01.shadow_vmcs);
|
||||
vmx->vmcs01.shadow_vmcs = NULL;
|
||||
}
|
||||
kfree(vmx->nested.cached_vmcs12);
|
||||
/* Unpin physical memory we referred to in current vmcs02 */
|
||||
if (vmx->nested.apic_access_page) {
|
||||
|
@ -7352,7 +7330,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
|
|||
int i;
|
||||
unsigned long field;
|
||||
u64 field_value;
|
||||
struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
|
||||
struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
|
||||
const unsigned long *fields = shadow_read_write_fields;
|
||||
const int num_fields = max_shadow_read_write_fields;
|
||||
|
||||
|
@ -7401,7 +7379,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
|
|||
int i, q;
|
||||
unsigned long field;
|
||||
u64 field_value = 0;
|
||||
struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
|
||||
struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
|
||||
|
||||
vmcs_load(shadow_vmcs);
|
||||
|
||||
|
@ -7591,7 +7569,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
|
|||
vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
|
||||
SECONDARY_EXEC_SHADOW_VMCS);
|
||||
vmcs_write64(VMCS_LINK_POINTER,
|
||||
__pa(vmx->nested.current_shadow_vmcs));
|
||||
__pa(vmx->vmcs01.shadow_vmcs));
|
||||
vmx->nested.sync_shadow_vmcs = true;
|
||||
}
|
||||
}
|
||||
|
@ -7659,7 +7637,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
|
|||
|
||||
types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
|
||||
|
||||
if (!(types & (1UL << type))) {
|
||||
if (type >= 32 || !(types & (1 << type))) {
|
||||
nested_vmx_failValid(vcpu,
|
||||
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
|
||||
skip_emulated_instruction(vcpu);
|
||||
|
@ -7722,7 +7700,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
|
|||
|
||||
types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7;
|
||||
|
||||
if (!(types & (1UL << type))) {
|
||||
if (type >= 32 || !(types & (1 << type))) {
|
||||
nested_vmx_failValid(vcpu,
|
||||
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
|
||||
skip_emulated_instruction(vcpu);
|
||||
|
@ -9156,6 +9134,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
|
|||
|
||||
vmx->loaded_vmcs = &vmx->vmcs01;
|
||||
vmx->loaded_vmcs->vmcs = alloc_vmcs();
|
||||
vmx->loaded_vmcs->shadow_vmcs = NULL;
|
||||
if (!vmx->loaded_vmcs->vmcs)
|
||||
goto free_msrs;
|
||||
if (!vmm_exclusive)
|
||||
|
@ -10061,9 +10040,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
|||
|
||||
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
|
||||
vmcs_write64(TSC_OFFSET,
|
||||
vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
|
||||
vcpu->arch.tsc_offset + vmcs12->tsc_offset);
|
||||
else
|
||||
vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
|
||||
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
|
||||
if (kvm_has_tsc_control)
|
||||
decache_tsc_multiplier(vmx);
|
||||
|
||||
|
@ -10293,8 +10272,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
|||
|
||||
enter_guest_mode(vcpu);
|
||||
|
||||
vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
|
||||
|
||||
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
|
||||
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
|
||||
|
||||
|
@ -10818,7 +10795,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
|||
load_vmcs12_host_state(vcpu, vmcs12);
|
||||
|
||||
/* Update any VMCS fields that might have changed while L2 ran */
|
||||
vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
|
||||
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
|
||||
if (vmx->hv_deadline_tsc == -1)
|
||||
vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
|
||||
PIN_BASED_VMX_PREEMPTION_TIMER);
|
||||
|
@ -11339,8 +11316,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
|||
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
|
||||
|
||||
.write_tsc_offset = vmx_write_tsc_offset,
|
||||
.adjust_tsc_offset_guest = vmx_adjust_tsc_offset_guest,
|
||||
.read_l1_tsc = vmx_read_l1_tsc,
|
||||
|
||||
.set_tdp_cr3 = vmx_set_cr3,
|
||||
|
||||
|
|
|
@ -1409,7 +1409,7 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
|
|||
|
||||
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
|
||||
{
|
||||
return kvm_x86_ops->read_l1_tsc(vcpu, kvm_scale_tsc(vcpu, host_tsc));
|
||||
return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
|
||||
|
||||
|
@ -1547,7 +1547,7 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);
|
|||
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
|
||||
s64 adjustment)
|
||||
{
|
||||
kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
|
||||
kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment);
|
||||
}
|
||||
|
||||
static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
|
||||
|
@ -1555,7 +1555,7 @@ static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
|
|||
if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
|
||||
WARN_ON(adjustment < 0);
|
||||
adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
|
||||
kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
|
||||
adjust_tsc_offset_guest(vcpu, adjustment);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
@ -2262,7 +2262,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
/* Drop writes to this legacy MSR -- see rdmsr
|
||||
* counterpart for further detail.
|
||||
*/
|
||||
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
|
||||
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
|
||||
break;
|
||||
case MSR_AMD64_OSVW_ID_LENGTH:
|
||||
if (!guest_cpuid_has_osvw(vcpu))
|
||||
|
@ -2280,11 +2280,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
if (kvm_pmu_is_valid_msr(vcpu, msr))
|
||||
return kvm_pmu_set_msr(vcpu, msr_info);
|
||||
if (!ignore_msrs) {
|
||||
vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
|
||||
vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
|
||||
msr, data);
|
||||
return 1;
|
||||
} else {
|
||||
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
|
||||
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
|
||||
msr, data);
|
||||
break;
|
||||
}
|
||||
|
@ -7410,10 +7410,12 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
|
|||
|
||||
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
|
||||
|
||||
kvmclock_reset(vcpu);
|
||||
|
||||
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
|
||||
kvm_x86_ops->vcpu_free(vcpu);
|
||||
free_cpumask_var(wbinvd_dirty_mask);
|
||||
}
|
||||
|
||||
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
||||
|
|
|
@ -122,7 +122,7 @@ static int acpi_apd_create_device(struct acpi_device *adev,
|
|||
int ret;
|
||||
|
||||
if (!dev_desc) {
|
||||
pdev = acpi_create_platform_device(adev);
|
||||
pdev = acpi_create_platform_device(adev, NULL);
|
||||
return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
|
||||
}
|
||||
|
||||
|
@ -139,14 +139,8 @@ static int acpi_apd_create_device(struct acpi_device *adev,
|
|||
goto err_out;
|
||||
}
|
||||
|
||||
if (dev_desc->properties) {
|
||||
ret = device_add_properties(&adev->dev, dev_desc->properties);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
adev->driver_data = pdata;
|
||||
pdev = acpi_create_platform_device(adev);
|
||||
pdev = acpi_create_platform_device(adev, dev_desc->properties);
|
||||
if (!IS_ERR_OR_NULL(pdev))
|
||||
return 1;
|
||||
|
||||
|
|
|
@ -395,7 +395,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
|
|||
|
||||
dev_desc = (const struct lpss_device_desc *)id->driver_data;
|
||||
if (!dev_desc) {
|
||||
pdev = acpi_create_platform_device(adev);
|
||||
pdev = acpi_create_platform_device(adev, NULL);
|
||||
return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
|
||||
}
|
||||
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
|
||||
|
@ -451,14 +451,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
|
|||
goto err_out;
|
||||
}
|
||||
|
||||
if (dev_desc->properties) {
|
||||
ret = device_add_properties(&adev->dev, dev_desc->properties);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
adev->driver_data = pdata;
|
||||
pdev = acpi_create_platform_device(adev);
|
||||
pdev = acpi_create_platform_device(adev, dev_desc->properties);
|
||||
if (!IS_ERR_OR_NULL(pdev)) {
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -50,6 +50,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
|
|||
/**
|
||||
* acpi_create_platform_device - Create platform device for ACPI device node
|
||||
* @adev: ACPI device node to create a platform device for.
|
||||
* @properties: Optional collection of build-in properties.
|
||||
*
|
||||
* Check if the given @adev can be represented as a platform device and, if
|
||||
* that's the case, create and register a platform device, populate its common
|
||||
|
@ -57,7 +58,8 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
|
|||
*
|
||||
* Name of the platform device will be the same as @adev's.
|
||||
*/
|
||||
struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
|
||||
struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
|
||||
struct property_entry *properties)
|
||||
{
|
||||
struct platform_device *pdev = NULL;
|
||||
struct platform_device_info pdevinfo;
|
||||
|
@ -106,6 +108,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
|
|||
pdevinfo.res = resources;
|
||||
pdevinfo.num_res = count;
|
||||
pdevinfo.fwnode = acpi_fwnode_handle(adev);
|
||||
pdevinfo.properties = properties;
|
||||
|
||||
if (acpi_dma_supported(adev))
|
||||
pdevinfo.dma_mask = DMA_BIT_MASK(32);
|
||||
|
|
|
@ -34,11 +34,11 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev,
|
|||
const struct acpi_device_id *id)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_INT340X_THERMAL))
|
||||
acpi_create_platform_device(adev);
|
||||
acpi_create_platform_device(adev, NULL);
|
||||
/* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
|
||||
else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) &&
|
||||
id->driver_data == INT3401_DEVICE)
|
||||
acpi_create_platform_device(adev);
|
||||
acpi_create_platform_device(adev, NULL);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -1734,7 +1734,7 @@ static void acpi_default_enumeration(struct acpi_device *device)
|
|||
&is_spi_i2c_slave);
|
||||
acpi_dev_free_resource_list(&resource_list);
|
||||
if (!is_spi_i2c_slave) {
|
||||
acpi_create_platform_device(device);
|
||||
acpi_create_platform_device(device, NULL);
|
||||
acpi_device_set_enumerated(device);
|
||||
} else {
|
||||
blocking_notifier_call_chain(&acpi_reconfig_chain,
|
||||
|
|
|
@ -324,7 +324,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
|
|||
{
|
||||
int ret = -EPROBE_DEFER;
|
||||
int local_trigger_count = atomic_read(&deferred_trigger_count);
|
||||
bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE);
|
||||
bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
|
||||
!drv->suppress_bind_attrs;
|
||||
|
||||
if (defer_all_probes) {
|
||||
/*
|
||||
|
@ -383,7 +384,7 @@ re_probe:
|
|||
if (test_remove) {
|
||||
test_remove = false;
|
||||
|
||||
if (dev->bus && dev->bus->remove)
|
||||
if (dev->bus->remove)
|
||||
dev->bus->remove(dev);
|
||||
else if (drv->remove)
|
||||
drv->remove(dev);
|
||||
|
|
|
@ -1027,6 +1027,8 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
|
|||
TRACE_DEVICE(dev);
|
||||
TRACE_SUSPEND(0);
|
||||
|
||||
dpm_wait_for_children(dev, async);
|
||||
|
||||
if (async_error)
|
||||
goto Complete;
|
||||
|
||||
|
@ -1038,8 +1040,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
|
|||
if (dev->power.syscore || dev->power.direct_complete)
|
||||
goto Complete;
|
||||
|
||||
dpm_wait_for_children(dev, async);
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "noirq power domain ";
|
||||
callback = pm_noirq_op(&dev->pm_domain->ops, state);
|
||||
|
@ -1174,6 +1174,8 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
|
|||
|
||||
__pm_runtime_disable(dev, false);
|
||||
|
||||
dpm_wait_for_children(dev, async);
|
||||
|
||||
if (async_error)
|
||||
goto Complete;
|
||||
|
||||
|
@ -1185,8 +1187,6 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
|
|||
if (dev->power.syscore || dev->power.direct_complete)
|
||||
goto Complete;
|
||||
|
||||
dpm_wait_for_children(dev, async);
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "late power domain ";
|
||||
callback = pm_late_early_op(&dev->pm_domain->ops, state);
|
||||
|
|
|
@ -853,45 +853,6 @@ rqbiocnt(struct request *r)
|
|||
return n;
|
||||
}
|
||||
|
||||
/* This can be removed if we are certain that no users of the block
|
||||
* layer will ever use zero-count pages in bios. Otherwise we have to
|
||||
* protect against the put_page sometimes done by the network layer.
|
||||
*
|
||||
* See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for
|
||||
* discussion.
|
||||
*
|
||||
* We cannot use get_page in the workaround, because it insists on a
|
||||
* positive page count as a precondition. So we use _refcount directly.
|
||||
*/
|
||||
static void
|
||||
bio_pageinc(struct bio *bio)
|
||||
{
|
||||
struct bio_vec bv;
|
||||
struct page *page;
|
||||
struct bvec_iter iter;
|
||||
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
/* Non-zero page count for non-head members of
|
||||
* compound pages is no longer allowed by the kernel.
|
||||
*/
|
||||
page = compound_head(bv.bv_page);
|
||||
page_ref_inc(page);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
bio_pagedec(struct bio *bio)
|
||||
{
|
||||
struct page *page;
|
||||
struct bio_vec bv;
|
||||
struct bvec_iter iter;
|
||||
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
page = compound_head(bv.bv_page);
|
||||
page_ref_dec(page);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
bufinit(struct buf *buf, struct request *rq, struct bio *bio)
|
||||
{
|
||||
|
@ -899,7 +860,6 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
|
|||
buf->rq = rq;
|
||||
buf->bio = bio;
|
||||
buf->iter = bio->bi_iter;
|
||||
bio_pageinc(bio);
|
||||
}
|
||||
|
||||
static struct buf *
|
||||
|
@ -1127,7 +1087,6 @@ aoe_end_buf(struct aoedev *d, struct buf *buf)
|
|||
if (buf == d->ip.buf)
|
||||
d->ip.buf = NULL;
|
||||
rq = buf->rq;
|
||||
bio_pagedec(buf->bio);
|
||||
mempool_free(buf, d->bufpool);
|
||||
n = (unsigned long) rq->special;
|
||||
rq->special = (void *) --n;
|
||||
|
|
|
@ -1871,7 +1871,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
|
|||
drbd_update_congested(connection);
|
||||
}
|
||||
do {
|
||||
rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
|
||||
rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
|
||||
if (rv == -EAGAIN) {
|
||||
if (we_should_drop_the_connection(connection, sock))
|
||||
break;
|
||||
|
|
|
@ -599,7 +599,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
|
|||
return -EINVAL;
|
||||
|
||||
sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0);
|
||||
if (!sreq)
|
||||
if (IS_ERR(sreq))
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_unlock(&nbd->tx_lock);
|
||||
|
|
|
@ -376,7 +376,7 @@ static void virtblk_config_changed(struct virtio_device *vdev)
|
|||
|
||||
static int init_vq(struct virtio_blk *vblk)
|
||||
{
|
||||
int err = 0;
|
||||
int err;
|
||||
int i;
|
||||
vq_callback_t **callbacks;
|
||||
const char **names;
|
||||
|
@ -390,13 +390,13 @@ static int init_vq(struct virtio_blk *vblk)
|
|||
if (err)
|
||||
num_vqs = 1;
|
||||
|
||||
vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
|
||||
vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
|
||||
if (!vblk->vqs)
|
||||
return -ENOMEM;
|
||||
|
||||
names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
|
||||
callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
|
||||
vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
|
||||
names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
|
||||
callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
|
||||
vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
|
||||
if (!names || !callbacks || !vqs) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
|
|
|
@ -748,10 +748,7 @@ static int pp_release(struct inode *inode, struct file *file)
|
|||
}
|
||||
|
||||
if (pp->pdev) {
|
||||
const char *name = pp->pdev->name;
|
||||
|
||||
parport_unregister_device(pp->pdev);
|
||||
kfree(name);
|
||||
pp->pdev = NULL;
|
||||
pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
|
||||
}
|
||||
|
|
|
@ -1539,19 +1539,29 @@ static void remove_port_data(struct port *port)
|
|||
spin_lock_irq(&port->inbuf_lock);
|
||||
/* Remove unused data this port might have received. */
|
||||
discard_port_data(port);
|
||||
spin_unlock_irq(&port->inbuf_lock);
|
||||
|
||||
/* Remove buffers we queued up for the Host to send us data in. */
|
||||
while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
|
||||
free_buf(buf, true);
|
||||
spin_unlock_irq(&port->inbuf_lock);
|
||||
do {
|
||||
spin_lock_irq(&port->inbuf_lock);
|
||||
buf = virtqueue_detach_unused_buf(port->in_vq);
|
||||
spin_unlock_irq(&port->inbuf_lock);
|
||||
if (buf)
|
||||
free_buf(buf, true);
|
||||
} while (buf);
|
||||
|
||||
spin_lock_irq(&port->outvq_lock);
|
||||
reclaim_consumed_buffers(port);
|
||||
spin_unlock_irq(&port->outvq_lock);
|
||||
|
||||
/* Free pending buffers from the out-queue. */
|
||||
while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
|
||||
free_buf(buf, true);
|
||||
spin_unlock_irq(&port->outvq_lock);
|
||||
do {
|
||||
spin_lock_irq(&port->outvq_lock);
|
||||
buf = virtqueue_detach_unused_buf(port->out_vq);
|
||||
spin_unlock_irq(&port->outvq_lock);
|
||||
if (buf)
|
||||
free_buf(buf, true);
|
||||
} while (buf);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -700,6 +700,7 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
|
|||
struct mux_hwclock *hwc,
|
||||
const struct clk_ops *ops,
|
||||
unsigned long min_rate,
|
||||
unsigned long max_rate,
|
||||
unsigned long pct80_rate,
|
||||
const char *fmt, int idx)
|
||||
{
|
||||
|
@ -728,6 +729,8 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
|
|||
continue;
|
||||
if (rate < min_rate)
|
||||
continue;
|
||||
if (rate > max_rate)
|
||||
continue;
|
||||
|
||||
parent_names[j] = div->name;
|
||||
hwc->parent_to_clksel[j] = i;
|
||||
|
@ -759,7 +762,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
|
|||
struct mux_hwclock *hwc;
|
||||
const struct clockgen_pll_div *div;
|
||||
unsigned long plat_rate, min_rate;
|
||||
u64 pct80_rate;
|
||||
u64 max_rate, pct80_rate;
|
||||
u32 clksel;
|
||||
|
||||
hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
|
||||
|
@ -787,8 +790,8 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
pct80_rate = clk_get_rate(div->clk);
|
||||
pct80_rate *= 8;
|
||||
max_rate = clk_get_rate(div->clk);
|
||||
pct80_rate = max_rate * 8;
|
||||
do_div(pct80_rate, 10);
|
||||
|
||||
plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
|
||||
|
@ -798,7 +801,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
|
|||
else
|
||||
min_rate = plat_rate / 2;
|
||||
|
||||
return create_mux_common(cg, hwc, &cmux_ops, min_rate,
|
||||
return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
|
||||
pct80_rate, "cg-cmux%d", idx);
|
||||
}
|
||||
|
||||
|
@ -813,7 +816,7 @@ static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
|
|||
hwc->reg = cg->regs + 0x20 * idx + 0x10;
|
||||
hwc->info = cg->info.hwaccel[idx];
|
||||
|
||||
return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0,
|
||||
return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
|
||||
"cg-hwaccel%d", idx);
|
||||
}
|
||||
|
||||
|
|
|
@ -463,22 +463,20 @@ static int xgene_clk_enable(struct clk_hw *hw)
|
|||
struct xgene_clk *pclk = to_xgene_clk(hw);
|
||||
unsigned long flags = 0;
|
||||
u32 data;
|
||||
phys_addr_t reg;
|
||||
|
||||
if (pclk->lock)
|
||||
spin_lock_irqsave(pclk->lock, flags);
|
||||
|
||||
if (pclk->param.csr_reg != NULL) {
|
||||
pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
|
||||
reg = __pa(pclk->param.csr_reg);
|
||||
/* First enable the clock */
|
||||
data = xgene_clk_read(pclk->param.csr_reg +
|
||||
pclk->param.reg_clk_offset);
|
||||
data |= pclk->param.reg_clk_mask;
|
||||
xgene_clk_write(data, pclk->param.csr_reg +
|
||||
pclk->param.reg_clk_offset);
|
||||
pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n",
|
||||
clk_hw_get_name(hw), ®,
|
||||
pr_debug("%s clk offset 0x%08X mask 0x%08X value 0x%08X\n",
|
||||
clk_hw_get_name(hw),
|
||||
pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
|
||||
data);
|
||||
|
||||
|
@ -488,8 +486,8 @@ static int xgene_clk_enable(struct clk_hw *hw)
|
|||
data &= ~pclk->param.reg_csr_mask;
|
||||
xgene_clk_write(data, pclk->param.csr_reg +
|
||||
pclk->param.reg_csr_offset);
|
||||
pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n",
|
||||
clk_hw_get_name(hw), ®,
|
||||
pr_debug("%s csr offset 0x%08X mask 0x%08X value 0x%08X\n",
|
||||
clk_hw_get_name(hw),
|
||||
pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
|
||||
data);
|
||||
}
|
||||
|
|
|
@ -223,7 +223,7 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
|
|||
temp64 *= mfn;
|
||||
do_div(temp64, mfd);
|
||||
|
||||
return (parent_rate * div) + (u32)temp64;
|
||||
return parent_rate * div + (unsigned long)temp64;
|
||||
}
|
||||
|
||||
static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
|
||||
|
@ -247,7 +247,11 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
|
|||
do_div(temp64, parent_rate);
|
||||
mfn = temp64;
|
||||
|
||||
return parent_rate * div + parent_rate * mfn / mfd;
|
||||
temp64 = (u64)parent_rate;
|
||||
temp64 *= mfn;
|
||||
do_div(temp64, mfd);
|
||||
|
||||
return parent_rate * div + (unsigned long)temp64;
|
||||
}
|
||||
|
||||
static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
|
|
|
@ -313,7 +313,7 @@ static void __init mmp2_clk_init(struct device_node *np)
|
|||
}
|
||||
|
||||
pxa_unit->apmu_base = of_iomap(np, 1);
|
||||
if (!pxa_unit->mpmu_base) {
|
||||
if (!pxa_unit->apmu_base) {
|
||||
pr_err("failed to map apmu registers\n");
|
||||
return;
|
||||
}
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче