Merge remote-tracking branch 'torvalds/master' into perf/urgent
To update kernel headers and check if some need syncing. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Коммит
689bb69093
|
@ -23,6 +23,7 @@ properties:
|
|||
- enum:
|
||||
- ingenic,jz4775-intc
|
||||
- ingenic,jz4770-intc
|
||||
- ingenic,jz4760b-intc
|
||||
- const: ingenic,jz4760-intc
|
||||
- items:
|
||||
- const: ingenic,x1000-intc
|
||||
|
|
|
@ -613,6 +613,27 @@ Some of these date from the very introduction of KMS in 2008 ...
|
|||
|
||||
Level: Intermediate
|
||||
|
||||
Remove automatic page mapping from dma-buf importing
|
||||
----------------------------------------------------
|
||||
|
||||
When importing dma-bufs, the dma-buf and PRIME frameworks automatically map
|
||||
imported pages into the importer's DMA area. drm_gem_prime_fd_to_handle() and
|
||||
drm_gem_prime_handle_to_fd() require that importers call dma_buf_attach()
|
||||
even if they never do actual device DMA, but only CPU access through
|
||||
dma_buf_vmap(). This is a problem for USB devices, which do not support DMA
|
||||
operations.
|
||||
|
||||
To fix the issue, automatic page mappings should be removed from the
|
||||
buffer-sharing code. Fixing this is a bit more involved, since the import/export
|
||||
cache is also tied to &drm_gem_object.import_attach. Meanwhile we paper over
|
||||
this problem for USB devices by fishing out the USB host controller device, as
|
||||
long as that supports DMA. Otherwise importing can still needlessly fail.
|
||||
|
||||
Contact: Thomas Zimmermann <tzimmermann@suse.de>, Daniel Vetter
|
||||
|
||||
Level: Advanced
|
||||
|
||||
|
||||
Better Testing
|
||||
==============
|
||||
|
||||
|
|
|
@ -182,6 +182,9 @@ is dependent on the CPU capability and the kernel configuration. The limit can
|
|||
be retrieved using KVM_CAP_ARM_VM_IPA_SIZE of the KVM_CHECK_EXTENSION
|
||||
ioctl() at run-time.
|
||||
|
||||
Creation of the VM will fail if the requested IPA size (whether it is
|
||||
implicit or explicit) is unsupported on the host.
|
||||
|
||||
Please note that configuring the IPA size does not affect the capability
|
||||
exposed by the guest CPUs in ID_AA64MMFR0_EL1[PARange]. It only affects
|
||||
size of the address translated by the stage2 level (guest physical to
|
||||
|
|
|
@ -261,8 +261,8 @@ ABI/API
|
|||
L: linux-api@vger.kernel.org
|
||||
F: include/linux/syscalls.h
|
||||
F: kernel/sys_ni.c
|
||||
F: include/uapi/
|
||||
F: arch/*/include/uapi/
|
||||
X: include/uapi/
|
||||
X: arch/*/include/uapi/
|
||||
|
||||
ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
|
@ -5835,7 +5835,7 @@ M: David Airlie <airlied@linux.ie>
|
|||
M: Daniel Vetter <daniel@ffwll.ch>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
B: https://bugs.freedesktop.org/
|
||||
B: https://gitlab.freedesktop.org/drm
|
||||
C: irc://chat.freenode.net/dri-devel
|
||||
T: git git://anongit.freedesktop.org/drm/drm
|
||||
F: Documentation/devicetree/bindings/display/
|
||||
|
@ -19166,7 +19166,7 @@ S: Maintained
|
|||
F: drivers/infiniband/hw/vmw_pvrdma/
|
||||
|
||||
VMware PVSCSI driver
|
||||
M: Jim Gill <jgill@vmware.com>
|
||||
M: Vishal Bhakta <vbhakta@vmware.com>
|
||||
M: VMware PV-Drivers <pv-drivers@vmware.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Maintained
|
||||
|
|
6
Makefile
6
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Frozen Wasteland
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -264,7 +264,8 @@ no-dot-config-targets := $(clean-targets) \
|
|||
$(version_h) headers headers_% archheaders archscripts \
|
||||
%asm-generic kernelversion %src-pkg dt_binding_check \
|
||||
outputmakefile
|
||||
no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease
|
||||
no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease \
|
||||
image_name
|
||||
single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.s %.symtypes %/
|
||||
|
||||
config-build :=
|
||||
|
@ -478,6 +479,7 @@ USERINCLUDE := \
|
|||
-I$(objtree)/arch/$(SRCARCH)/include/generated/uapi \
|
||||
-I$(srctree)/include/uapi \
|
||||
-I$(objtree)/include/generated/uapi \
|
||||
-include $(srctree)/include/linux/compiler-version.h \
|
||||
-include $(srctree)/include/linux/kconfig.h
|
||||
|
||||
# Use LINUXINCLUDE when you must reference the include/ directory.
|
||||
|
|
|
@ -632,13 +632,12 @@ config HAS_LTO_CLANG
|
|||
def_bool y
|
||||
# Clang >= 11: https://github.com/ClangBuiltLinux/linux/issues/510
|
||||
depends on CC_IS_CLANG && CLANG_VERSION >= 110000 && LD_IS_LLD
|
||||
depends on $(success,test $(LLVM) -eq 1)
|
||||
depends on $(success,test $(LLVM_IAS) -eq 1)
|
||||
depends on $(success,$(NM) --help | head -n 1 | grep -qi llvm)
|
||||
depends on $(success,$(AR) --help | head -n 1 | grep -qi llvm)
|
||||
depends on ARCH_SUPPORTS_LTO_CLANG
|
||||
depends on !FTRACE_MCOUNT_USE_RECORDMCOUNT
|
||||
depends on !KASAN
|
||||
depends on !KASAN || KASAN_HW_TAGS
|
||||
depends on !GCOV_KERNEL
|
||||
help
|
||||
The compiler and Kconfig options support building with Clang's
|
||||
|
|
|
@ -348,6 +348,7 @@ config ARCH_EP93XX
|
|||
select ARM_AMBA
|
||||
imply ARM_PATCH_PHYS_VIRT
|
||||
select ARM_VIC
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select AUTO_ZRELADDR
|
||||
select CLKDEV_LOOKUP
|
||||
select CLKSRC_MMIO
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
#include <xen/xen.h>
|
||||
#include <xen/interface/memory.h>
|
||||
#include <xen/grant_table.h>
|
||||
#include <xen/page.h>
|
||||
#include <xen/swiotlb-xen.h>
|
||||
|
||||
|
@ -109,7 +110,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||
map_ops[i].status = GNTST_general_error;
|
||||
unmap.host_addr = map_ops[i].host_addr,
|
||||
unmap.handle = map_ops[i].handle;
|
||||
map_ops[i].handle = ~0;
|
||||
map_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||
if (map_ops[i].flags & GNTMAP_device_map)
|
||||
unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
|
||||
else
|
||||
|
@ -130,7 +131,6 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
|
||||
|
||||
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
||||
struct gnttab_unmap_grant_ref *kunmap_ops,
|
||||
|
@ -145,7 +145,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
|
||||
|
||||
bool __set_phys_to_machine_multi(unsigned long pfn,
|
||||
unsigned long mfn, unsigned long nr_pages)
|
||||
|
|
|
@ -1055,8 +1055,6 @@ config HW_PERF_EVENTS
|
|||
config SYS_SUPPORTS_HUGETLBFS
|
||||
def_bool y
|
||||
|
||||
config ARCH_WANT_HUGE_PMD_SHARE
|
||||
|
||||
config ARCH_HAS_CACHE_LINE_SIZE
|
||||
def_bool y
|
||||
|
||||
|
@ -1157,8 +1155,8 @@ config XEN
|
|||
|
||||
config FORCE_MAX_ZONEORDER
|
||||
int
|
||||
default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
|
||||
default "12" if (ARM64_16K_PAGES && TRANSPARENT_HUGEPAGE)
|
||||
default "14" if ARM64_64K_PAGES
|
||||
default "12" if ARM64_16K_PAGES
|
||||
default "11"
|
||||
help
|
||||
The kernel memory allocator divides physically contiguous memory
|
||||
|
@ -1855,12 +1853,6 @@ config CMDLINE_FROM_BOOTLOADER
|
|||
the boot loader doesn't provide any, the default kernel command
|
||||
string provided in CMDLINE will be used.
|
||||
|
||||
config CMDLINE_EXTEND
|
||||
bool "Extend bootloader kernel arguments"
|
||||
help
|
||||
The command-line arguments provided by the boot loader will be
|
||||
appended to the default kernel command string.
|
||||
|
||||
config CMDLINE_FORCE
|
||||
bool "Always use the default kernel command string"
|
||||
help
|
||||
|
|
|
@ -47,10 +47,10 @@
|
|||
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid 5
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context 5
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2 8
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config 8
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr 9
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr 10
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs 11
|
||||
|
@ -183,16 +183,16 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
|
|||
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
|
||||
|
||||
extern void __kvm_flush_vm_context(void);
|
||||
extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
|
||||
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
|
||||
int level);
|
||||
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
|
||||
extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
|
||||
|
||||
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
|
||||
|
||||
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern u64 __vgic_v3_get_ich_vtr_el2(void);
|
||||
extern u64 __vgic_v3_get_gic_config(void);
|
||||
extern u64 __vgic_v3_read_vmcr(void);
|
||||
extern void __vgic_v3_write_vmcr(u32 vmcr);
|
||||
extern void __vgic_v3_init_lrs(void);
|
||||
|
|
|
@ -83,6 +83,11 @@ void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
|
|||
void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
|
||||
void __debug_switch_to_host(struct kvm_vcpu *vcpu);
|
||||
|
||||
#ifdef __KVM_NVHE_HYPERVISOR__
|
||||
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
|
||||
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
|
||||
#endif
|
||||
|
||||
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
|
||||
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
|
||||
|
||||
|
@ -97,7 +102,8 @@ bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
|
|||
|
||||
void __noreturn hyp_panic(void);
|
||||
#ifdef __KVM_NVHE_HYPERVISOR__
|
||||
void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
|
||||
void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
|
||||
u64 elr, u64 par);
|
||||
#endif
|
||||
|
||||
#endif /* __ARM64_KVM_HYP_H__ */
|
||||
|
|
|
@ -328,6 +328,11 @@ static inline void *phys_to_virt(phys_addr_t x)
|
|||
#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
|
||||
|
||||
#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
|
||||
#define page_to_virt(x) ({ \
|
||||
__typeof__(x) __page = x; \
|
||||
void *__addr = __va(page_to_phys(__page)); \
|
||||
(void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
|
||||
})
|
||||
#define virt_to_page(x) pfn_to_page(virt_to_pfn(x))
|
||||
#else
|
||||
#define page_to_virt(x) ({ \
|
||||
|
|
|
@ -63,23 +63,6 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
|
|||
extern u64 idmap_t0sz;
|
||||
extern u64 idmap_ptrs_per_pgd;
|
||||
|
||||
static inline bool __cpu_uses_extended_idmap(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
|
||||
return false;
|
||||
|
||||
return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
|
||||
}
|
||||
|
||||
/*
|
||||
* True if the extended ID map requires an extra level of translation table
|
||||
* to be configured.
|
||||
*/
|
||||
static inline bool __cpu_uses_extended_idmap_level(void)
|
||||
{
|
||||
return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure TCR.T0SZ is set to the provided value.
|
||||
*/
|
||||
|
|
|
@ -66,7 +66,6 @@ extern bool arm64_use_ng_mappings;
|
|||
#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
|
||||
|
||||
#define PAGE_KERNEL __pgprot(PROT_NORMAL)
|
||||
#define PAGE_KERNEL_TAGGED __pgprot(PROT_NORMAL_TAGGED)
|
||||
#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN)
|
||||
|
|
|
@ -486,6 +486,9 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
|
|||
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
|
||||
#define pgprot_device(prot) \
|
||||
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
|
||||
#define pgprot_tagged(prot) \
|
||||
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
|
||||
#define pgprot_mhp pgprot_tagged
|
||||
/*
|
||||
* DMA allocations for non-coherent devices use what the Arm architecture calls
|
||||
* "Normal non-cacheable" memory, which permits speculation, unaligned accesses
|
||||
|
|
|
@ -796,6 +796,11 @@
|
|||
#define ID_AA64MMFR0_PARANGE_48 0x5
|
||||
#define ID_AA64MMFR0_PARANGE_52 0x6
|
||||
|
||||
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0
|
||||
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1
|
||||
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2
|
||||
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7
|
||||
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52
|
||||
#else
|
||||
|
@ -961,14 +966,17 @@
|
|||
#define ID_PFR1_PROGMOD_SHIFT 0
|
||||
|
||||
#if defined(CONFIG_ARM64_4K_PAGES)
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN4_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
|
||||
#elif defined(CONFIG_ARM64_16K_PAGES)
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN16_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0xF
|
||||
#elif defined(CONFIG_ARM64_64K_PAGES)
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
|
||||
#endif
|
||||
|
||||
#define MVFR2_FPMISC_SHIFT 4
|
||||
|
|
|
@ -319,7 +319,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
|
|||
*/
|
||||
adrp x5, __idmap_text_end
|
||||
clz x5, x5
|
||||
cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
|
||||
cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
|
||||
b.ge 1f // .. then skip VA range extension
|
||||
|
||||
adr_l x6, idmap_t0sz
|
||||
|
@ -655,8 +655,10 @@ SYM_FUNC_END(__secondary_too_slow)
|
|||
SYM_FUNC_START(__enable_mmu)
|
||||
mrs x2, ID_AA64MMFR0_EL1
|
||||
ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
|
||||
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
|
||||
b.ne __no_granule_support
|
||||
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN
|
||||
b.lt __no_granule_support
|
||||
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX
|
||||
b.gt __no_granule_support
|
||||
update_early_cpu_boot_status 0, x2, x3
|
||||
adrp x2, idmap_pg_dir
|
||||
phys_to_ttbr x1, x1
|
||||
|
|
|
@ -163,33 +163,36 @@ static __init void __parse_cmdline(const char *cmdline, bool parse_aliases)
|
|||
} while (1);
|
||||
}
|
||||
|
||||
static __init const u8 *get_bootargs_cmdline(void)
|
||||
{
|
||||
const u8 *prop;
|
||||
void *fdt;
|
||||
int node;
|
||||
|
||||
fdt = get_early_fdt_ptr();
|
||||
if (!fdt)
|
||||
return NULL;
|
||||
|
||||
node = fdt_path_offset(fdt, "/chosen");
|
||||
if (node < 0)
|
||||
return NULL;
|
||||
|
||||
prop = fdt_getprop(fdt, node, "bootargs", NULL);
|
||||
if (!prop)
|
||||
return NULL;
|
||||
|
||||
return strlen(prop) ? prop : NULL;
|
||||
}
|
||||
|
||||
static __init void parse_cmdline(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
|
||||
const u8 *prop;
|
||||
void *fdt;
|
||||
int node;
|
||||
const u8 *prop = get_bootargs_cmdline();
|
||||
|
||||
fdt = get_early_fdt_ptr();
|
||||
if (!fdt)
|
||||
goto out;
|
||||
|
||||
node = fdt_path_offset(fdt, "/chosen");
|
||||
if (node < 0)
|
||||
goto out;
|
||||
|
||||
prop = fdt_getprop(fdt, node, "bootargs", NULL);
|
||||
if (!prop)
|
||||
goto out;
|
||||
if (IS_ENABLED(CONFIG_CMDLINE_FORCE) || !prop)
|
||||
__parse_cmdline(CONFIG_CMDLINE, true);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && prop)
|
||||
__parse_cmdline(prop, true);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_CMDLINE_EXTEND))
|
||||
return;
|
||||
}
|
||||
|
||||
out:
|
||||
__parse_cmdline(CONFIG_CMDLINE, true);
|
||||
}
|
||||
|
||||
/* Keep checkers quiet */
|
||||
|
|
|
@ -101,6 +101,9 @@ KVM_NVHE_ALIAS(__stop___kvm_ex_table);
|
|||
/* Array containing bases of nVHE per-CPU memory regions. */
|
||||
KVM_NVHE_ALIAS(kvm_arm_hyp_percpu_base);
|
||||
|
||||
/* PMU available static key */
|
||||
KVM_NVHE_ALIAS(kvm_arm_pmu_available);
|
||||
|
||||
#endif /* CONFIG_KVM */
|
||||
|
||||
#endif /* __ARM64_KERNEL_IMAGE_VARS_H */
|
||||
|
|
|
@ -460,7 +460,7 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
|
|||
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
|
||||
}
|
||||
|
||||
static inline u32 armv8pmu_read_evcntr(int idx)
|
||||
static inline u64 armv8pmu_read_evcntr(int idx)
|
||||
{
|
||||
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
|
||||
|
||||
|
|
|
@ -385,11 +385,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
|
||||
|
||||
/*
|
||||
* We guarantee that both TLBs and I-cache are private to each
|
||||
* vcpu. If detecting that a vcpu from the same VM has
|
||||
* previously run on the same physical CPU, call into the
|
||||
* hypervisor code to nuke the relevant contexts.
|
||||
*
|
||||
* We might get preempted before the vCPU actually runs, but
|
||||
* over-invalidation doesn't affect correctness.
|
||||
*/
|
||||
if (*last_ran != vcpu->vcpu_id) {
|
||||
kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu);
|
||||
kvm_call_hyp(__kvm_flush_cpu_context, mmu);
|
||||
*last_ran = vcpu->vcpu_id;
|
||||
}
|
||||
|
||||
|
|
|
@ -85,8 +85,10 @@ SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
|
|||
|
||||
// If the hyp context is loaded, go straight to hyp_panic
|
||||
get_loaded_vcpu x0, x1
|
||||
cbz x0, hyp_panic
|
||||
cbnz x0, 1f
|
||||
b hyp_panic
|
||||
|
||||
1:
|
||||
// The hyp context is saved so make sure it is restored to allow
|
||||
// hyp_panic to run at hyp and, subsequently, panic to run in the host.
|
||||
// This makes use of __guest_exit to avoid duplication but sets the
|
||||
|
@ -94,7 +96,7 @@ SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
|
|||
// current state is saved to the guest context but it will only be
|
||||
// accurate if the guest had been completely restored.
|
||||
adr_this_cpu x0, kvm_hyp_ctxt, x1
|
||||
adr x1, hyp_panic
|
||||
adr_l x1, hyp_panic
|
||||
str x1, [x0, #CPU_XREG_OFFSET(30)]
|
||||
|
||||
get_vcpu_ptr x1, x0
|
||||
|
@ -146,7 +148,7 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
|
|||
// Now restore the hyp regs
|
||||
restore_callee_saved_regs x2
|
||||
|
||||
set_loaded_vcpu xzr, x1, x2
|
||||
set_loaded_vcpu xzr, x2, x3
|
||||
|
||||
alternative_if ARM64_HAS_RAS_EXTN
|
||||
// If we have the RAS extensions we can consume a pending error
|
||||
|
|
|
@ -90,15 +90,18 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
|
|||
* counter, which could make a PMXEVCNTR_EL0 access UNDEF at
|
||||
* EL1 instead of being trapped to EL2.
|
||||
*/
|
||||
write_sysreg(0, pmselr_el0);
|
||||
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
|
||||
if (kvm_arm_support_pmu_v3()) {
|
||||
write_sysreg(0, pmselr_el0);
|
||||
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
|
||||
}
|
||||
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
|
||||
}
|
||||
|
||||
static inline void __deactivate_traps_common(void)
|
||||
{
|
||||
write_sysreg(0, hstr_el2);
|
||||
write_sysreg(0, pmuserenr_el0);
|
||||
if (kvm_arm_support_pmu_v3())
|
||||
write_sysreg(0, pmuserenr_el0);
|
||||
}
|
||||
|
||||
static inline void ___activate_traps(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -58,16 +58,24 @@ static void __debug_restore_spe(u64 pmscr_el1)
|
|||
write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
|
||||
}
|
||||
|
||||
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* Disable and flush SPE data generation */
|
||||
__debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
|
||||
}
|
||||
|
||||
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__debug_switch_to_guest_common(vcpu);
|
||||
}
|
||||
|
||||
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
|
||||
}
|
||||
|
||||
void __debug_switch_to_host(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
|
||||
__debug_switch_to_host_common(vcpu);
|
||||
}
|
||||
|
||||
|
|
|
@ -71,7 +71,8 @@ SYM_FUNC_START(__host_enter)
|
|||
SYM_FUNC_END(__host_enter)
|
||||
|
||||
/*
|
||||
* void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
|
||||
* void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
|
||||
* u64 elr, u64 par);
|
||||
*/
|
||||
SYM_FUNC_START(__hyp_do_panic)
|
||||
/* Prepare and exit to the host's panic funciton. */
|
||||
|
@ -82,9 +83,11 @@ SYM_FUNC_START(__hyp_do_panic)
|
|||
hyp_kimg_va lr, x6
|
||||
msr elr_el2, lr
|
||||
|
||||
/* Set the panic format string. Use the, now free, LR as scratch. */
|
||||
ldr lr, =__hyp_panic_string
|
||||
hyp_kimg_va lr, x6
|
||||
mov x29, x0
|
||||
|
||||
/* Load the format string into x0 and arguments into x1-7 */
|
||||
ldr x0, =__hyp_panic_string
|
||||
hyp_kimg_va x0, x6
|
||||
|
||||
/* Load the format arguments into x1-7. */
|
||||
mov x6, x3
|
||||
|
@ -94,9 +97,7 @@ SYM_FUNC_START(__hyp_do_panic)
|
|||
mrs x5, hpfar_el2
|
||||
|
||||
/* Enter the host, conditionally restoring the host context. */
|
||||
cmp x0, xzr
|
||||
mov x0, lr
|
||||
b.eq __host_enter_without_restoring
|
||||
cbz x29, __host_enter_without_restoring
|
||||
b __host_enter_for_panic
|
||||
SYM_FUNC_END(__hyp_do_panic)
|
||||
|
||||
|
|
|
@ -46,11 +46,11 @@ static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
|
|||
__kvm_tlb_flush_vmid(kern_hyp_va(mmu));
|
||||
}
|
||||
|
||||
static void handle___kvm_tlb_flush_local_vmid(struct kvm_cpu_context *host_ctxt)
|
||||
static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
|
||||
|
||||
__kvm_tlb_flush_local_vmid(kern_hyp_va(mmu));
|
||||
__kvm_flush_cpu_context(kern_hyp_va(mmu));
|
||||
}
|
||||
|
||||
static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
|
||||
|
@ -67,9 +67,9 @@ static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt)
|
|||
write_sysreg_el2(tmp, SYS_SCTLR);
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_get_ich_vtr_el2(struct kvm_cpu_context *host_ctxt)
|
||||
static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
cpu_reg(host_ctxt, 1) = __vgic_v3_get_ich_vtr_el2();
|
||||
cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config();
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
|
||||
|
@ -115,10 +115,10 @@ static const hcall_t host_hcall[] = {
|
|||
HANDLE_FUNC(__kvm_flush_vm_context),
|
||||
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
|
||||
HANDLE_FUNC(__kvm_tlb_flush_vmid),
|
||||
HANDLE_FUNC(__kvm_tlb_flush_local_vmid),
|
||||
HANDLE_FUNC(__kvm_flush_cpu_context),
|
||||
HANDLE_FUNC(__kvm_timer_set_cntvoff),
|
||||
HANDLE_FUNC(__kvm_enable_ssbs),
|
||||
HANDLE_FUNC(__vgic_v3_get_ich_vtr_el2),
|
||||
HANDLE_FUNC(__vgic_v3_get_gic_config),
|
||||
HANDLE_FUNC(__vgic_v3_read_vmcr),
|
||||
HANDLE_FUNC(__vgic_v3_write_vmcr),
|
||||
HANDLE_FUNC(__vgic_v3_init_lrs),
|
||||
|
|
|
@ -192,6 +192,14 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
|
||||
|
||||
__sysreg_save_state_nvhe(host_ctxt);
|
||||
/*
|
||||
* We must flush and disable the SPE buffer for nVHE, as
|
||||
* the translation regime(EL1&0) is going to be loaded with
|
||||
* that of the guest. And we must do this before we change the
|
||||
* translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
|
||||
* before we load guest Stage1.
|
||||
*/
|
||||
__debug_save_host_buffers_nvhe(vcpu);
|
||||
|
||||
__adjust_pc(vcpu);
|
||||
|
||||
|
@ -234,11 +242,12 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
|
||||
__fpsimd_save_fpexc32(vcpu);
|
||||
|
||||
__debug_switch_to_host(vcpu);
|
||||
/*
|
||||
* This must come after restoring the host sysregs, since a non-VHE
|
||||
* system may enable SPE here and make use of the TTBRs.
|
||||
*/
|
||||
__debug_switch_to_host(vcpu);
|
||||
__debug_restore_host_buffers_nvhe(vcpu);
|
||||
|
||||
if (pmu_switch_needed)
|
||||
__pmu_switch_to_host(host_ctxt);
|
||||
|
@ -257,7 +266,6 @@ void __noreturn hyp_panic(void)
|
|||
u64 spsr = read_sysreg_el2(SYS_SPSR);
|
||||
u64 elr = read_sysreg_el2(SYS_ELR);
|
||||
u64 par = read_sysreg_par();
|
||||
bool restore_host = true;
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
|
@ -271,7 +279,7 @@ void __noreturn hyp_panic(void)
|
|||
__sysreg_restore_state_nvhe(host_ctxt);
|
||||
}
|
||||
|
||||
__hyp_do_panic(restore_host, spsr, elr, par);
|
||||
__hyp_do_panic(host_ctxt, spsr, elr, par);
|
||||
unreachable();
|
||||
}
|
||||
|
||||
|
|
|
@ -123,7 +123,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
|
|||
__tlb_switch_to_host(&cxt);
|
||||
}
|
||||
|
||||
void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
|
||||
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
|
||||
{
|
||||
struct tlb_inv_context cxt;
|
||||
|
||||
|
@ -131,6 +131,7 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
|
|||
__tlb_switch_to_guest(mmu, &cxt);
|
||||
|
||||
__tlbi(vmalle1);
|
||||
asm volatile("ic iallu");
|
||||
dsb(nsh);
|
||||
isb();
|
||||
|
||||
|
|
|
@ -223,6 +223,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
|
|||
goto out;
|
||||
|
||||
if (!table) {
|
||||
data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
|
||||
data->addr += kvm_granule_size(level);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -405,9 +405,45 @@ void __vgic_v3_init_lrs(void)
|
|||
__gic_v3_set_lr(0, i);
|
||||
}
|
||||
|
||||
u64 __vgic_v3_get_ich_vtr_el2(void)
|
||||
/*
|
||||
* Return the GIC CPU configuration:
|
||||
* - [31:0] ICH_VTR_EL2
|
||||
* - [62:32] RES0
|
||||
* - [63] MMIO (GICv2) capable
|
||||
*/
|
||||
u64 __vgic_v3_get_gic_config(void)
|
||||
{
|
||||
return read_gicreg(ICH_VTR_EL2);
|
||||
u64 val, sre = read_gicreg(ICC_SRE_EL1);
|
||||
unsigned long flags = 0;
|
||||
|
||||
/*
|
||||
* To check whether we have a MMIO-based (GICv2 compatible)
|
||||
* CPU interface, we need to disable the system register
|
||||
* view. To do that safely, we have to prevent any interrupt
|
||||
* from firing (which would be deadly).
|
||||
*
|
||||
* Note that this only makes sense on VHE, as interrupts are
|
||||
* already masked for nVHE as part of the exception entry to
|
||||
* EL2.
|
||||
*/
|
||||
if (has_vhe())
|
||||
flags = local_daif_save();
|
||||
|
||||
write_gicreg(0, ICC_SRE_EL1);
|
||||
isb();
|
||||
|
||||
val = read_gicreg(ICC_SRE_EL1);
|
||||
|
||||
write_gicreg(sre, ICC_SRE_EL1);
|
||||
isb();
|
||||
|
||||
if (has_vhe())
|
||||
local_daif_restore(flags);
|
||||
|
||||
val = (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63);
|
||||
val |= read_gicreg(ICH_VTR_EL2);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
u64 __vgic_v3_read_vmcr(void)
|
||||
|
|
|
@ -127,7 +127,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
|
|||
__tlb_switch_to_host(&cxt);
|
||||
}
|
||||
|
||||
void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
|
||||
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
|
||||
{
|
||||
struct tlb_inv_context cxt;
|
||||
|
||||
|
@ -135,6 +135,7 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
|
|||
__tlb_switch_to_guest(mmu, &cxt);
|
||||
|
||||
__tlbi(vmalle1);
|
||||
asm volatile("ic iallu");
|
||||
dsb(nsh);
|
||||
isb();
|
||||
|
||||
|
|
|
@ -1312,8 +1312,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
* Prevent userspace from creating a memory region outside of the IPA
|
||||
* space addressable by the KVM guest IPA space.
|
||||
*/
|
||||
if (memslot->base_gfn + memslot->npages >=
|
||||
(kvm_phys_size(kvm) >> PAGE_SHIFT))
|
||||
if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
|
||||
return -EFAULT;
|
||||
|
||||
mmap_read_lock(current->mm);
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
|
||||
#include <asm/kvm_emulate.h>
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
|
||||
|
||||
static int kvm_is_in_guest(void)
|
||||
{
|
||||
return kvm_get_running_vcpu() != NULL;
|
||||
|
@ -48,6 +50,14 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
|
|||
|
||||
int kvm_perf_init(void)
|
||||
{
|
||||
/*
|
||||
* Check if HW_PERF_EVENTS are supported by checking the number of
|
||||
* hardware performance counters. This could ensure the presence of
|
||||
* a physical PMU and CONFIG_PERF_EVENT is selected.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_ARM_PMU) && perf_num_counters() > 0)
|
||||
static_branch_enable(&kvm_arm_pmu_available);
|
||||
|
||||
return perf_register_guest_info_callbacks(&kvm_guest_cbs);
|
||||
}
|
||||
|
||||
|
|
|
@ -823,16 +823,6 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
|
|||
return val & mask;
|
||||
}
|
||||
|
||||
bool kvm_arm_support_pmu_v3(void)
|
||||
{
|
||||
/*
|
||||
* Check if HW_PERF_EVENTS are supported by checking the number of
|
||||
* hardware performance counters. This could ensure the presence of
|
||||
* a physical PMU and CONFIG_PERF_EVENT is selected.
|
||||
*/
|
||||
return (perf_num_counters() > 0);
|
||||
}
|
||||
|
||||
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
|
|
|
@ -311,23 +311,24 @@ int kvm_set_ipa_limit(void)
|
|||
}
|
||||
|
||||
switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) {
|
||||
default:
|
||||
case 1:
|
||||
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
|
||||
kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
|
||||
return -EINVAL;
|
||||
case 0:
|
||||
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT:
|
||||
kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
|
||||
break;
|
||||
case 2:
|
||||
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX:
|
||||
kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
|
||||
break;
|
||||
default:
|
||||
kvm_err("Unsupported value for TGRAN_2, giving up\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
|
||||
WARN(kvm_ipa_limit < KVM_PHYS_SHIFT,
|
||||
"KVM IPA Size Limit (%d bits) is smaller than default size\n",
|
||||
kvm_ipa_limit);
|
||||
kvm_info("IPA Size Limit: %d bits\n", kvm_ipa_limit);
|
||||
kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
|
||||
((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
|
||||
" (Reduced IPA size, limited VM/VMM compatibility)" : ""));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -356,6 +357,11 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
|
|||
return -EINVAL;
|
||||
} else {
|
||||
phys_shift = KVM_PHYS_SHIFT;
|
||||
if (phys_shift > kvm_ipa_limit) {
|
||||
pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
|
||||
current->comm);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
|
|
|
@ -574,9 +574,13 @@ early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
|
|||
*/
|
||||
int vgic_v3_probe(const struct gic_kvm_info *info)
|
||||
{
|
||||
u32 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_ich_vtr_el2);
|
||||
u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
|
||||
bool has_v2;
|
||||
int ret;
|
||||
|
||||
has_v2 = ich_vtr_el2 >> 63;
|
||||
ich_vtr_el2 = (u32)ich_vtr_el2;
|
||||
|
||||
/*
|
||||
* The ListRegs field is 5 bits, but there is an architectural
|
||||
* maximum of 16 list registers. Just ignore bit 4...
|
||||
|
@ -594,13 +598,15 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
|
|||
gicv4_enable ? "en" : "dis");
|
||||
}
|
||||
|
||||
kvm_vgic_global_state.vcpu_base = 0;
|
||||
|
||||
if (!info->vcpu.start) {
|
||||
kvm_info("GICv3: no GICV resource entry\n");
|
||||
kvm_vgic_global_state.vcpu_base = 0;
|
||||
} else if (!has_v2) {
|
||||
pr_warn(FW_BUG "CPU interface incapable of MMIO access\n");
|
||||
} else if (!PAGE_ALIGNED(info->vcpu.start)) {
|
||||
pr_warn("GICV physical address 0x%llx not page aligned\n",
|
||||
(unsigned long long)info->vcpu.start);
|
||||
kvm_vgic_global_state.vcpu_base = 0;
|
||||
} else {
|
||||
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
|
||||
kvm_vgic_global_state.can_emulate_gicv2 = true;
|
||||
|
|
|
@ -219,17 +219,40 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
|
|||
|
||||
int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
phys_addr_t addr = pfn << PAGE_SHIFT;
|
||||
phys_addr_t addr = PFN_PHYS(pfn);
|
||||
|
||||
if ((addr >> PAGE_SHIFT) != pfn)
|
||||
/*
|
||||
* Ensure the upper PAGE_SHIFT bits are clear in the
|
||||
* pfn. Else it might lead to false positives when
|
||||
* some of the upper bits are set, but the lower bits
|
||||
* match a valid pfn.
|
||||
*/
|
||||
if (PHYS_PFN(addr) != pfn)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
{
|
||||
struct mem_section *ms;
|
||||
|
||||
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
|
||||
return 0;
|
||||
|
||||
if (!valid_section(__pfn_to_section(pfn)))
|
||||
ms = __pfn_to_section(pfn);
|
||||
if (!valid_section(ms))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* ZONE_DEVICE memory does not have the memblock entries.
|
||||
* memblock_is_map_memory() check for ZONE_DEVICE based
|
||||
* addresses will always fail. Even the normal hotplugged
|
||||
* memory will never have MEMBLOCK_NOMAP flag set in their
|
||||
* memblock entries. Skip memblock search for all non early
|
||||
* memory sections covering all of hotplug memory including
|
||||
* both normal and ZONE_DEVICE based.
|
||||
*/
|
||||
if (!early_section(ms))
|
||||
return pfn_section_valid(ms, pfn);
|
||||
}
|
||||
#endif
|
||||
return memblock_is_map_memory(addr);
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
#define NO_BLOCK_MAPPINGS BIT(0)
|
||||
#define NO_CONT_MAPPINGS BIT(1)
|
||||
|
||||
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
|
||||
u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
|
||||
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
|
||||
|
||||
u64 __section(".mmuoff.data.write") vabits_actual;
|
||||
|
@ -512,7 +512,8 @@ static void __init map_mem(pgd_t *pgdp)
|
|||
* if MTE is present. Otherwise, it has the same attributes as
|
||||
* PAGE_KERNEL.
|
||||
*/
|
||||
__map_memblock(pgdp, start, end, PAGE_KERNEL_TAGGED, flags);
|
||||
__map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
|
||||
flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -32,7 +32,7 @@ static inline void syscall_rollback(struct task_struct *task,
|
|||
static inline long syscall_get_error(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return regs->r10 == -1 ? regs->r8:0;
|
||||
return regs->r10 == -1 ? -regs->r8:0;
|
||||
}
|
||||
|
||||
static inline long syscall_get_return_value(struct task_struct *task,
|
||||
|
|
|
@ -2013,27 +2013,39 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
|
|||
{
|
||||
struct syscall_get_set_args *args = data;
|
||||
struct pt_regs *pt = args->regs;
|
||||
unsigned long *krbs, cfm, ndirty;
|
||||
unsigned long *krbs, cfm, ndirty, nlocals, nouts;
|
||||
int i, count;
|
||||
|
||||
if (unw_unwind_to_user(info) < 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* We get here via a few paths:
|
||||
* - break instruction: cfm is shared with caller.
|
||||
* syscall args are in out= regs, locals are non-empty.
|
||||
* - epsinstruction: cfm is set by br.call
|
||||
* locals don't exist.
|
||||
*
|
||||
* For both cases argguments are reachable in cfm.sof - cfm.sol.
|
||||
* CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ]
|
||||
*/
|
||||
cfm = pt->cr_ifs;
|
||||
nlocals = (cfm >> 7) & 0x7f; /* aka sol */
|
||||
nouts = (cfm & 0x7f) - nlocals; /* aka sof - sol */
|
||||
krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
|
||||
ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
|
||||
|
||||
count = 0;
|
||||
if (in_syscall(pt))
|
||||
count = min_t(int, args->n, cfm & 0x7f);
|
||||
count = min_t(int, args->n, nouts);
|
||||
|
||||
/* Iterate over outs. */
|
||||
for (i = 0; i < count; i++) {
|
||||
int j = ndirty + nlocals + i + args->i;
|
||||
if (args->rw)
|
||||
*ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
|
||||
args->args[i];
|
||||
*ia64_rse_skip_regs(krbs, j) = args->args[i];
|
||||
else
|
||||
args->args[i] = *ia64_rse_skip_regs(krbs,
|
||||
ndirty + i + args->i);
|
||||
args->args[i] = *ia64_rse_skip_regs(krbs, j);
|
||||
}
|
||||
|
||||
if (!args->rw) {
|
||||
|
|
|
@ -73,9 +73,10 @@ void __patch_exception(int exc, unsigned long addr);
|
|||
#endif
|
||||
|
||||
#define OP_RT_RA_MASK 0xffff0000UL
|
||||
#define LIS_R2 0x3c020000UL
|
||||
#define ADDIS_R2_R12 0x3c4c0000UL
|
||||
#define ADDI_R2_R2 0x38420000UL
|
||||
#define LIS_R2 (PPC_INST_ADDIS | __PPC_RT(R2))
|
||||
#define ADDIS_R2_R12 (PPC_INST_ADDIS | __PPC_RT(R2) | __PPC_RA(R12))
|
||||
#define ADDI_R2_R2 (PPC_INST_ADDI | __PPC_RT(R2) | __PPC_RA(R2))
|
||||
|
||||
|
||||
static inline unsigned long ppc_function_entry(void *func)
|
||||
{
|
||||
|
|
|
@ -410,7 +410,6 @@ DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
|
|||
DECLARE_INTERRUPT_HANDLER(CacheLockingException);
|
||||
DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
|
||||
DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
|
||||
DECLARE_INTERRUPT_HANDLER(unrecoverable_exception);
|
||||
DECLARE_INTERRUPT_HANDLER(WatchdogException);
|
||||
DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
|
||||
|
||||
|
@ -437,6 +436,8 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
|
|||
|
||||
DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
|
||||
|
||||
void unrecoverable_exception(struct pt_regs *regs);
|
||||
|
||||
void replay_system_reset(void);
|
||||
void replay_soft_interrupts(void);
|
||||
|
||||
|
|
|
@ -195,7 +195,7 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
|
|||
#define TRAP_FLAGS_MASK 0x11
|
||||
#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
|
||||
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
|
||||
#define SET_FULL_REGS(regs) ((regs)->trap |= 1)
|
||||
#define SET_FULL_REGS(regs) ((regs)->trap &= ~1)
|
||||
#endif
|
||||
#define CHECK_FULL_REGS(regs) BUG_ON(!FULL_REGS(regs))
|
||||
#define NV_REG_POISON 0xdeadbeefdeadbeefUL
|
||||
|
@ -210,7 +210,7 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
|
|||
#define TRAP_FLAGS_MASK 0x1F
|
||||
#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
|
||||
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
|
||||
#define SET_FULL_REGS(regs) ((regs)->trap |= 1)
|
||||
#define SET_FULL_REGS(regs) ((regs)->trap &= ~1)
|
||||
#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
|
||||
#define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
|
||||
#define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)
|
||||
|
|
|
@ -71,6 +71,16 @@ static inline void disable_kernel_vsx(void)
|
|||
{
|
||||
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
|
||||
}
|
||||
#else
|
||||
static inline void enable_kernel_vsx(void)
|
||||
{
|
||||
BUILD_BUG();
|
||||
}
|
||||
|
||||
static inline void disable_kernel_vsx(void)
|
||||
{
|
||||
BUILD_BUG();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SPE
|
||||
|
|
|
@ -466,7 +466,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
|
|||
|
||||
ld r10,PACAKMSR(r13) /* get MSR value for kernel */
|
||||
/* MSR[RI] is clear iff using SRR regs */
|
||||
.if IHSRR == EXC_HV_OR_STD
|
||||
.if IHSRR_IF_HVMODE
|
||||
BEGIN_FTR_SECTION
|
||||
xori r10,r10,MSR_RI
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
|
||||
|
|
|
@ -436,7 +436,6 @@ again:
|
|||
return ret;
|
||||
}
|
||||
|
||||
void unrecoverable_exception(struct pt_regs *regs);
|
||||
void preempt_schedule_irq(void);
|
||||
|
||||
notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr)
|
||||
|
|
|
@ -2170,7 +2170,7 @@ DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException)
|
|||
* in the MSR is 0. This indicates that SRR0/1 are live, and that
|
||||
* we therefore lost state by taking this exception.
|
||||
*/
|
||||
DEFINE_INTERRUPT_HANDLER(unrecoverable_exception)
|
||||
void unrecoverable_exception(struct pt_regs *regs)
|
||||
{
|
||||
pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n",
|
||||
regs->trap, regs->nip, regs->msr);
|
||||
|
|
|
@ -275,9 +275,9 @@ CONFIG_IP_VS_DH=m
|
|||
CONFIG_IP_VS_SH=m
|
||||
CONFIG_IP_VS_SED=m
|
||||
CONFIG_IP_VS_NQ=m
|
||||
CONFIG_IP_VS_TWOS=m
|
||||
CONFIG_IP_VS_FTP=m
|
||||
CONFIG_IP_VS_PE_SIP=m
|
||||
CONFIG_NF_TABLES_IPV4=y
|
||||
CONFIG_NFT_FIB_IPV4=m
|
||||
CONFIG_NF_TABLES_ARP=y
|
||||
CONFIG_IP_NF_IPTABLES=m
|
||||
|
@ -298,7 +298,6 @@ CONFIG_IP_NF_SECURITY=m
|
|||
CONFIG_IP_NF_ARPTABLES=m
|
||||
CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_TABLES_IPV6=y
|
||||
CONFIG_NFT_FIB_IPV6=m
|
||||
CONFIG_IP6_NF_IPTABLES=m
|
||||
CONFIG_IP6_NF_MATCH_AH=m
|
||||
|
@ -481,7 +480,6 @@ CONFIG_NLMON=m
|
|||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_VENDOR_ATHEROS is not set
|
||||
# CONFIG_NET_VENDOR_AURORA is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
# CONFIG_NET_VENDOR_BROCADE is not set
|
||||
# CONFIG_NET_VENDOR_CADENCE is not set
|
||||
|
@ -581,7 +579,6 @@ CONFIG_VIRTIO_BALLOON=m
|
|||
CONFIG_VIRTIO_INPUT=y
|
||||
CONFIG_VHOST_NET=m
|
||||
CONFIG_VHOST_VSOCK=m
|
||||
# CONFIG_SURFACE_PLATFORMS is not set
|
||||
CONFIG_S390_CCW_IOMMU=y
|
||||
CONFIG_S390_AP_IOMMU=y
|
||||
CONFIG_EXT4_FS=y
|
||||
|
@ -635,6 +632,7 @@ CONFIG_NTFS_RW=y
|
|||
CONFIG_PROC_KCORE=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
CONFIG_TMPFS_INODE64=y
|
||||
CONFIG_HUGETLBFS=y
|
||||
CONFIG_CONFIGFS_FS=m
|
||||
CONFIG_ECRYPT_FS=m
|
||||
|
@ -714,12 +712,8 @@ CONFIG_CRYPTO_VMAC=m
|
|||
CONFIG_CRYPTO_CRC32=m
|
||||
CONFIG_CRYPTO_BLAKE2S=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
CONFIG_CRYPTO_RMD128=m
|
||||
CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_RMD256=m
|
||||
CONFIG_CRYPTO_RMD320=m
|
||||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
|
@ -731,7 +725,6 @@ CONFIG_CRYPTO_CAST6=m
|
|||
CONFIG_CRYPTO_DES=m
|
||||
CONFIG_CRYPTO_FCRYPT=m
|
||||
CONFIG_CRYPTO_KHAZAD=m
|
||||
CONFIG_CRYPTO_SALSA20=m
|
||||
CONFIG_CRYPTO_SEED=m
|
||||
CONFIG_CRYPTO_SERPENT=m
|
||||
CONFIG_CRYPTO_SM4=m
|
||||
|
@ -796,12 +789,9 @@ CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
|
|||
CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
|
||||
CONFIG_SLUB_DEBUG_ON=y
|
||||
CONFIG_SLUB_STATS=y
|
||||
CONFIG_DEBUG_KMEMLEAK=y
|
||||
CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_VM=y
|
||||
CONFIG_DEBUG_VM_VMACACHE=y
|
||||
CONFIG_DEBUG_VM_RB=y
|
||||
CONFIG_DEBUG_VM_PGFLAGS=y
|
||||
CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
|
||||
|
@ -838,6 +828,7 @@ CONFIG_BPF_KPROBE_OVERRIDE=y
|
|||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_FTRACE_STARTUP_TEST=y
|
||||
# CONFIG_EVENT_TRACE_STARTUP_TEST is not set
|
||||
CONFIG_DEBUG_ENTRY=y
|
||||
CONFIG_NOTIFIER_ERROR_INJECTION=m
|
||||
CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
|
||||
CONFIG_FAULT_INJECTION=y
|
||||
|
@ -861,4 +852,3 @@ CONFIG_PERCPU_TEST=m
|
|||
CONFIG_ATOMIC64_SELFTEST=y
|
||||
CONFIG_TEST_BITOPS=m
|
||||
CONFIG_TEST_BPF=m
|
||||
CONFIG_DEBUG_ENTRY=y
|
||||
|
|
|
@ -266,9 +266,9 @@ CONFIG_IP_VS_DH=m
|
|||
CONFIG_IP_VS_SH=m
|
||||
CONFIG_IP_VS_SED=m
|
||||
CONFIG_IP_VS_NQ=m
|
||||
CONFIG_IP_VS_TWOS=m
|
||||
CONFIG_IP_VS_FTP=m
|
||||
CONFIG_IP_VS_PE_SIP=m
|
||||
CONFIG_NF_TABLES_IPV4=y
|
||||
CONFIG_NFT_FIB_IPV4=m
|
||||
CONFIG_NF_TABLES_ARP=y
|
||||
CONFIG_IP_NF_IPTABLES=m
|
||||
|
@ -289,7 +289,6 @@ CONFIG_IP_NF_SECURITY=m
|
|||
CONFIG_IP_NF_ARPTABLES=m
|
||||
CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_TABLES_IPV6=y
|
||||
CONFIG_NFT_FIB_IPV6=m
|
||||
CONFIG_IP6_NF_IPTABLES=m
|
||||
CONFIG_IP6_NF_MATCH_AH=m
|
||||
|
@ -473,7 +472,6 @@ CONFIG_NLMON=m
|
|||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_VENDOR_ATHEROS is not set
|
||||
# CONFIG_NET_VENDOR_AURORA is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
# CONFIG_NET_VENDOR_BROCADE is not set
|
||||
# CONFIG_NET_VENDOR_CADENCE is not set
|
||||
|
@ -573,7 +571,6 @@ CONFIG_VIRTIO_BALLOON=m
|
|||
CONFIG_VIRTIO_INPUT=y
|
||||
CONFIG_VHOST_NET=m
|
||||
CONFIG_VHOST_VSOCK=m
|
||||
# CONFIG_SURFACE_PLATFORMS is not set
|
||||
CONFIG_S390_CCW_IOMMU=y
|
||||
CONFIG_S390_AP_IOMMU=y
|
||||
CONFIG_EXT4_FS=y
|
||||
|
@ -623,6 +620,7 @@ CONFIG_NTFS_RW=y
|
|||
CONFIG_PROC_KCORE=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
CONFIG_TMPFS_INODE64=y
|
||||
CONFIG_HUGETLBFS=y
|
||||
CONFIG_CONFIGFS_FS=m
|
||||
CONFIG_ECRYPT_FS=m
|
||||
|
@ -703,12 +701,8 @@ CONFIG_CRYPTO_VMAC=m
|
|||
CONFIG_CRYPTO_CRC32=m
|
||||
CONFIG_CRYPTO_BLAKE2S=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
CONFIG_CRYPTO_RMD128=m
|
||||
CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_RMD256=m
|
||||
CONFIG_CRYPTO_RMD320=m
|
||||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
|
@ -720,7 +714,6 @@ CONFIG_CRYPTO_CAST6=m
|
|||
CONFIG_CRYPTO_DES=m
|
||||
CONFIG_CRYPTO_FCRYPT=m
|
||||
CONFIG_CRYPTO_KHAZAD=m
|
||||
CONFIG_CRYPTO_SALSA20=m
|
||||
CONFIG_CRYPTO_SEED=m
|
||||
CONFIG_CRYPTO_SERPENT=m
|
||||
CONFIG_CRYPTO_SM4=m
|
||||
|
|
|
@ -26,7 +26,6 @@ CONFIG_CRASH_DUMP=y
|
|||
# CONFIG_SECCOMP is not set
|
||||
# CONFIG_GCC_PLUGINS is not set
|
||||
CONFIG_PARTITION_ADVANCED=y
|
||||
CONFIG_IBM_PARTITION=y
|
||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
||||
# CONFIG_COMPACTION is not set
|
||||
# CONFIG_MIGRATION is not set
|
||||
|
@ -61,11 +60,9 @@ CONFIG_RAW_DRIVER=y
|
|||
# CONFIG_HID is not set
|
||||
# CONFIG_VIRTIO_MENU is not set
|
||||
# CONFIG_VHOST_MENU is not set
|
||||
# CONFIG_SURFACE_PLATFORMS is not set
|
||||
# CONFIG_IOMMU_SUPPORT is not set
|
||||
# CONFIG_DNOTIFY is not set
|
||||
# CONFIG_INOTIFY_USER is not set
|
||||
CONFIG_CONFIGFS_FS=y
|
||||
# CONFIG_MISC_FILESYSTEMS is not set
|
||||
# CONFIG_NETWORK_FILESYSTEMS is not set
|
||||
CONFIG_LSM="yama,loadpin,safesetid,integrity"
|
||||
|
|
|
@ -14,12 +14,12 @@
|
|||
|
||||
struct s390_idle_data {
|
||||
seqcount_t seqcount;
|
||||
unsigned long long idle_count;
|
||||
unsigned long long idle_time;
|
||||
unsigned long long clock_idle_enter;
|
||||
unsigned long long clock_idle_exit;
|
||||
unsigned long long timer_idle_enter;
|
||||
unsigned long long timer_idle_exit;
|
||||
unsigned long idle_count;
|
||||
unsigned long idle_time;
|
||||
unsigned long clock_idle_enter;
|
||||
unsigned long clock_idle_exit;
|
||||
unsigned long timer_idle_enter;
|
||||
unsigned long timer_idle_exit;
|
||||
unsigned long mt_cycles_enter[8];
|
||||
};
|
||||
|
||||
|
|
|
@ -98,10 +98,10 @@ extern unsigned char ptff_function_mask[16];
|
|||
|
||||
/* Query TOD offset result */
|
||||
struct ptff_qto {
|
||||
unsigned long long physical_clock;
|
||||
unsigned long long tod_offset;
|
||||
unsigned long long logical_tod_offset;
|
||||
unsigned long long tod_epoch_difference;
|
||||
unsigned long physical_clock;
|
||||
unsigned long tod_offset;
|
||||
unsigned long logical_tod_offset;
|
||||
unsigned long tod_epoch_difference;
|
||||
} __packed;
|
||||
|
||||
static inline int ptff_query(unsigned int nr)
|
||||
|
@ -151,9 +151,9 @@ struct ptff_qui {
|
|||
rc; \
|
||||
})
|
||||
|
||||
static inline unsigned long long local_tick_disable(void)
|
||||
static inline unsigned long local_tick_disable(void)
|
||||
{
|
||||
unsigned long long old;
|
||||
unsigned long old;
|
||||
|
||||
old = S390_lowcore.clock_comparator;
|
||||
S390_lowcore.clock_comparator = clock_comparator_max;
|
||||
|
@ -161,7 +161,7 @@ static inline unsigned long long local_tick_disable(void)
|
|||
return old;
|
||||
}
|
||||
|
||||
static inline void local_tick_enable(unsigned long long comp)
|
||||
static inline void local_tick_enable(unsigned long comp)
|
||||
{
|
||||
S390_lowcore.clock_comparator = comp;
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
|
@ -169,9 +169,9 @@ static inline void local_tick_enable(unsigned long long comp)
|
|||
|
||||
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
|
||||
|
||||
typedef unsigned long long cycles_t;
|
||||
typedef unsigned long cycles_t;
|
||||
|
||||
static inline unsigned long long get_tod_clock(void)
|
||||
static inline unsigned long get_tod_clock(void)
|
||||
{
|
||||
union tod_clock clk;
|
||||
|
||||
|
@ -179,10 +179,10 @@ static inline unsigned long long get_tod_clock(void)
|
|||
return clk.tod;
|
||||
}
|
||||
|
||||
static inline unsigned long long get_tod_clock_fast(void)
|
||||
static inline unsigned long get_tod_clock_fast(void)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
|
||||
unsigned long long clk;
|
||||
unsigned long clk;
|
||||
|
||||
asm volatile("stckf %0" : "=Q" (clk) : : "cc");
|
||||
return clk;
|
||||
|
@ -208,9 +208,9 @@ extern union tod_clock tod_clock_base;
|
|||
* Therefore preemption must be disabled, otherwise the returned
|
||||
* value is not guaranteed to be monotonic.
|
||||
*/
|
||||
static inline unsigned long long get_tod_clock_monotonic(void)
|
||||
static inline unsigned long get_tod_clock_monotonic(void)
|
||||
{
|
||||
unsigned long long tod;
|
||||
unsigned long tod;
|
||||
|
||||
preempt_disable_notrace();
|
||||
tod = get_tod_clock() - tod_clock_base.tod;
|
||||
|
@ -237,7 +237,7 @@ static inline unsigned long long get_tod_clock_monotonic(void)
|
|||
* -> ns = (th * 125) + ((tl * 125) >> 9);
|
||||
*
|
||||
*/
|
||||
static inline unsigned long long tod_to_ns(unsigned long long todval)
|
||||
static inline unsigned long tod_to_ns(unsigned long todval)
|
||||
{
|
||||
return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9);
|
||||
}
|
||||
|
@ -249,10 +249,10 @@ static inline unsigned long long tod_to_ns(unsigned long long todval)
|
|||
*
|
||||
* Returns: true if a is later than b
|
||||
*/
|
||||
static inline int tod_after(unsigned long long a, unsigned long long b)
|
||||
static inline int tod_after(unsigned long a, unsigned long b)
|
||||
{
|
||||
if (MACHINE_HAS_SCC)
|
||||
return (long long) a > (long long) b;
|
||||
return (long) a > (long) b;
|
||||
return a > b;
|
||||
}
|
||||
|
||||
|
@ -263,10 +263,10 @@ static inline int tod_after(unsigned long long a, unsigned long long b)
|
|||
*
|
||||
* Returns: true if a is later than b
|
||||
*/
|
||||
static inline int tod_after_eq(unsigned long long a, unsigned long long b)
|
||||
static inline int tod_after_eq(unsigned long a, unsigned long b)
|
||||
{
|
||||
if (MACHINE_HAS_SCC)
|
||||
return (long long) a >= (long long) b;
|
||||
return (long) a >= (long) b;
|
||||
return a >= b;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ void account_idle_time_irq(void)
|
|||
void arch_cpu_idle(void)
|
||||
{
|
||||
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
|
||||
unsigned long long idle_time;
|
||||
unsigned long idle_time;
|
||||
unsigned long psw_mask;
|
||||
|
||||
/* Wait for external, I/O or machine check interrupt. */
|
||||
|
@ -73,7 +73,7 @@ static ssize_t show_idle_count(struct device *dev,
|
|||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
|
||||
unsigned long long idle_count;
|
||||
unsigned long idle_count;
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
|
@ -82,14 +82,14 @@ static ssize_t show_idle_count(struct device *dev,
|
|||
if (READ_ONCE(idle->clock_idle_enter))
|
||||
idle_count++;
|
||||
} while (read_seqcount_retry(&idle->seqcount, seq));
|
||||
return sprintf(buf, "%llu\n", idle_count);
|
||||
return sprintf(buf, "%lu\n", idle_count);
|
||||
}
|
||||
DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
|
||||
|
||||
static ssize_t show_idle_time(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
unsigned long long now, idle_time, idle_enter, idle_exit, in_idle;
|
||||
unsigned long now, idle_time, idle_enter, idle_exit, in_idle;
|
||||
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
|
||||
unsigned int seq;
|
||||
|
||||
|
@ -109,14 +109,14 @@ static ssize_t show_idle_time(struct device *dev,
|
|||
}
|
||||
}
|
||||
idle_time += in_idle;
|
||||
return sprintf(buf, "%llu\n", idle_time >> 12);
|
||||
return sprintf(buf, "%lu\n", idle_time >> 12);
|
||||
}
|
||||
DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
|
||||
|
||||
u64 arch_cpu_idle_time(int cpu)
|
||||
{
|
||||
struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
|
||||
unsigned long long now, idle_enter, idle_exit, in_idle;
|
||||
unsigned long now, idle_enter, idle_exit, in_idle;
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
|
|
|
@ -269,7 +269,7 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
|
|||
case CPUMF_CTR_SET_MAX:
|
||||
/* The counter could not be associated to a counter set */
|
||||
return -EINVAL;
|
||||
};
|
||||
}
|
||||
|
||||
/* Initialize for using the CPU-measurement counter facility */
|
||||
if (!atomic_inc_not_zero(&num_events)) {
|
||||
|
|
|
@ -26,12 +26,10 @@
|
|||
#include <asm/timex.h>
|
||||
#include <asm/debug.h>
|
||||
|
||||
#include <asm/perf_cpum_cf_diag.h>
|
||||
#include <asm/hwctrset.h>
|
||||
|
||||
#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */
|
||||
#define CF_DIAG_MIN_INTERVAL 60 /* Minimum counter set read */
|
||||
/* interval in seconds */
|
||||
static unsigned long cf_diag_interval = CF_DIAG_MIN_INTERVAL;
|
||||
static unsigned int cf_diag_cpu_speed;
|
||||
static debug_info_t *cf_diag_dbg;
|
||||
|
||||
|
@ -729,7 +727,6 @@ static DEFINE_MUTEX(cf_diag_ctrset_mutex);
|
|||
static struct cf_diag_ctrset {
|
||||
unsigned long ctrset; /* Bit mask of counter set to read */
|
||||
cpumask_t mask; /* CPU mask to read from */
|
||||
time64_t lastread; /* Epoch counter set last read */
|
||||
} cf_diag_ctrset;
|
||||
|
||||
static void cf_diag_ctrset_clear(void)
|
||||
|
@ -866,27 +863,16 @@ static int cf_diag_all_read(unsigned long arg)
|
|||
{
|
||||
struct cf_diag_call_on_cpu_parm p;
|
||||
cpumask_var_t mask;
|
||||
time64_t now;
|
||||
int rc = 0;
|
||||
int rc;
|
||||
|
||||
debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
|
||||
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
now = ktime_get_seconds();
|
||||
if (cf_diag_ctrset.lastread + cf_diag_interval > now) {
|
||||
debug_sprintf_event(cf_diag_dbg, 5, "%s now %lld "
|
||||
" lastread %lld\n", __func__, now,
|
||||
cf_diag_ctrset.lastread);
|
||||
rc = -EAGAIN;
|
||||
goto out;
|
||||
} else {
|
||||
cf_diag_ctrset.lastread = now;
|
||||
}
|
||||
|
||||
p.sets = cf_diag_ctrset.ctrset;
|
||||
cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
|
||||
on_each_cpu_mask(mask, cf_diag_cpu_read, &p, 1);
|
||||
rc = cf_diag_all_copy(arg, mask);
|
||||
out:
|
||||
free_cpumask_var(mask);
|
||||
debug_sprintf_event(cf_diag_dbg, 5, "%s rc %d\n", __func__, rc);
|
||||
return rc;
|
||||
|
|
|
@ -68,10 +68,10 @@ EXPORT_SYMBOL(s390_epoch_delta_notifier);
|
|||
|
||||
unsigned char ptff_function_mask[16];
|
||||
|
||||
static unsigned long long lpar_offset;
|
||||
static unsigned long long initial_leap_seconds;
|
||||
static unsigned long long tod_steering_end;
|
||||
static long long tod_steering_delta;
|
||||
static unsigned long lpar_offset;
|
||||
static unsigned long initial_leap_seconds;
|
||||
static unsigned long tod_steering_end;
|
||||
static long tod_steering_delta;
|
||||
|
||||
/*
|
||||
* Get time offsets with PTFF
|
||||
|
@ -96,7 +96,7 @@ void __init time_early_init(void)
|
|||
|
||||
/* get initial leap seconds */
|
||||
if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
|
||||
initial_leap_seconds = (unsigned long long)
|
||||
initial_leap_seconds = (unsigned long)
|
||||
((long) qui.old_leap * 4096000000L);
|
||||
}
|
||||
|
||||
|
@ -222,7 +222,7 @@ void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
|
|||
|
||||
static u64 read_tod_clock(struct clocksource *cs)
|
||||
{
|
||||
unsigned long long now, adj;
|
||||
unsigned long now, adj;
|
||||
|
||||
preempt_disable(); /* protect from changes to steering parameters */
|
||||
now = get_tod_clock();
|
||||
|
@ -362,7 +362,7 @@ static inline int check_sync_clock(void)
|
|||
* Apply clock delta to the global data structures.
|
||||
* This is called once on the CPU that performed the clock sync.
|
||||
*/
|
||||
static void clock_sync_global(unsigned long long delta)
|
||||
static void clock_sync_global(unsigned long delta)
|
||||
{
|
||||
unsigned long now, adj;
|
||||
struct ptff_qto qto;
|
||||
|
@ -378,7 +378,7 @@ static void clock_sync_global(unsigned long long delta)
|
|||
-(adj >> 15) : (adj >> 15);
|
||||
tod_steering_delta += delta;
|
||||
if ((abs(tod_steering_delta) >> 48) != 0)
|
||||
panic("TOD clock sync offset %lli is too large to drift\n",
|
||||
panic("TOD clock sync offset %li is too large to drift\n",
|
||||
tod_steering_delta);
|
||||
tod_steering_end = now + (abs(tod_steering_delta) << 15);
|
||||
vdso_data->arch_data.tod_steering_end = tod_steering_end;
|
||||
|
@ -394,7 +394,7 @@ static void clock_sync_global(unsigned long long delta)
|
|||
* Apply clock delta to the per-CPU data structures of this CPU.
|
||||
* This is called for each online CPU after the call to clock_sync_global.
|
||||
*/
|
||||
static void clock_sync_local(unsigned long long delta)
|
||||
static void clock_sync_local(unsigned long delta)
|
||||
{
|
||||
/* Add the delta to the clock comparator. */
|
||||
if (S390_lowcore.clock_comparator != clock_comparator_max) {
|
||||
|
@ -418,7 +418,7 @@ static void __init time_init_wq(void)
|
|||
struct clock_sync_data {
|
||||
atomic_t cpus;
|
||||
int in_sync;
|
||||
unsigned long long clock_delta;
|
||||
unsigned long clock_delta;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -538,7 +538,7 @@ static int stpinfo_valid(void)
|
|||
static int stp_sync_clock(void *data)
|
||||
{
|
||||
struct clock_sync_data *sync = data;
|
||||
unsigned long long clock_delta, flags;
|
||||
u64 clock_delta, flags;
|
||||
static int first;
|
||||
int rc;
|
||||
|
||||
|
@ -720,8 +720,8 @@ static ssize_t ctn_id_show(struct device *dev,
|
|||
|
||||
mutex_lock(&stp_mutex);
|
||||
if (stpinfo_valid())
|
||||
ret = sprintf(buf, "%016llx\n",
|
||||
*(unsigned long long *) stp_info.ctnid);
|
||||
ret = sprintf(buf, "%016lx\n",
|
||||
*(unsigned long *) stp_info.ctnid);
|
||||
mutex_unlock(&stp_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -794,7 +794,7 @@ static ssize_t leap_seconds_scheduled_show(struct device *dev,
|
|||
if (!stzi.lsoib.p)
|
||||
return sprintf(buf, "0,0\n");
|
||||
|
||||
return sprintf(buf, "%llu,%d\n",
|
||||
return sprintf(buf, "%lu,%d\n",
|
||||
tod_to_ns(stzi.lsoib.nlsout - TOD_UNIX_EPOCH) / NSEC_PER_SEC,
|
||||
stzi.lsoib.nlso - stzi.lsoib.also);
|
||||
}
|
||||
|
|
|
@ -76,8 +76,6 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
|
|||
}
|
||||
info = info->next;
|
||||
}
|
||||
if (cpumask_empty(&mask))
|
||||
cpumask_copy(&mask, cpumask_of(cpu));
|
||||
break;
|
||||
case TOPOLOGY_MODE_PACKAGE:
|
||||
cpumask_copy(&mask, cpu_present_mask);
|
||||
|
|
|
@ -1287,7 +1287,7 @@ static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
|
|||
/* already expired? */
|
||||
if (cputm >> 63)
|
||||
return 0;
|
||||
return min(sltime, tod_to_ns(cputm));
|
||||
return min_t(u64, sltime, tod_to_ns(cputm));
|
||||
}
|
||||
} else if (cpu_timer_interrupts_enabled(vcpu)) {
|
||||
sltime = kvm_s390_get_cpu_timer(vcpu);
|
||||
|
|
|
@ -128,7 +128,8 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
|
|||
regs->ax = -EFAULT;
|
||||
|
||||
instrumentation_end();
|
||||
syscall_exit_to_user_mode(regs);
|
||||
local_irq_disable();
|
||||
irqentry_exit_to_user_mode(regs);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -210,6 +210,8 @@ SYM_CODE_START(entry_SYSCALL_compat)
|
|||
/* Switch to the kernel stack */
|
||||
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
||||
|
||||
SYM_INNER_LABEL(entry_SYSCALL_compat_safe_stack, SYM_L_GLOBAL)
|
||||
|
||||
/* Construct struct pt_regs on stack */
|
||||
pushq $__USER32_DS /* pt_regs->ss */
|
||||
pushq %r8 /* pt_regs->sp */
|
||||
|
|
|
@ -81,7 +81,11 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx);
|
|||
DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
|
||||
DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases);
|
||||
|
||||
DEFINE_STATIC_CALL_NULL(x86_pmu_guest_get_msrs, *x86_pmu.guest_get_msrs);
|
||||
/*
|
||||
* This one is magic, it will get called even when PMU init fails (because
|
||||
* there is no PMU), in which case it should simply return NULL.
|
||||
*/
|
||||
DEFINE_STATIC_CALL_RET0(x86_pmu_guest_get_msrs, *x86_pmu.guest_get_msrs);
|
||||
|
||||
u64 __read_mostly hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
|
@ -1944,13 +1948,6 @@ static void _x86_pmu_read(struct perf_event *event)
|
|||
x86_perf_event_update(event);
|
||||
}
|
||||
|
||||
static inline struct perf_guest_switch_msr *
|
||||
perf_guest_get_msrs_nop(int *nr)
|
||||
{
|
||||
*nr = 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int __init init_hw_perf_events(void)
|
||||
{
|
||||
struct x86_pmu_quirk *quirk;
|
||||
|
@ -2025,7 +2022,7 @@ static int __init init_hw_perf_events(void)
|
|||
x86_pmu.read = _x86_pmu_read;
|
||||
|
||||
if (!x86_pmu.guest_get_msrs)
|
||||
x86_pmu.guest_get_msrs = perf_guest_get_msrs_nop;
|
||||
x86_pmu.guest_get_msrs = (void *)&__static_call_return0;
|
||||
|
||||
x86_pmu_static_call_update();
|
||||
|
||||
|
|
|
@ -3662,8 +3662,10 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
|||
if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
|
||||
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
|
||||
if (!(event->attr.sample_type &
|
||||
~intel_pmu_large_pebs_flags(event)))
|
||||
~intel_pmu_large_pebs_flags(event))) {
|
||||
event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
|
||||
event->attach_state |= PERF_ATTACH_SCHED_CB;
|
||||
}
|
||||
}
|
||||
if (x86_pmu.pebs_aliases)
|
||||
x86_pmu.pebs_aliases(event);
|
||||
|
@ -3676,6 +3678,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
|||
ret = intel_pmu_setup_lbr_filter(event);
|
||||
if (ret)
|
||||
return ret;
|
||||
event->attach_state |= PERF_ATTACH_SCHED_CB;
|
||||
|
||||
/*
|
||||
* BTS is set up earlier in this path, so don't account twice
|
||||
|
|
|
@ -23,6 +23,8 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx);
|
|||
int insn_get_code_seg_params(struct pt_regs *regs);
|
||||
int insn_fetch_from_user(struct pt_regs *regs,
|
||||
unsigned char buf[MAX_INSN_SIZE]);
|
||||
int insn_fetch_from_user_inatomic(struct pt_regs *regs,
|
||||
unsigned char buf[MAX_INSN_SIZE]);
|
||||
bool insn_decode(struct insn *insn, struct pt_regs *regs,
|
||||
unsigned char buf[MAX_INSN_SIZE], int buf_size);
|
||||
|
||||
|
|
|
@ -963,7 +963,7 @@ struct kvm_arch {
|
|||
struct kvm_pit *vpit;
|
||||
atomic_t vapics_in_nmi_mode;
|
||||
struct mutex apic_map_lock;
|
||||
struct kvm_apic_map *apic_map;
|
||||
struct kvm_apic_map __rcu *apic_map;
|
||||
atomic_t apic_map_dirty;
|
||||
|
||||
bool apic_access_page_done;
|
||||
|
@ -1036,7 +1036,7 @@ struct kvm_arch {
|
|||
|
||||
bool bus_lock_detection_enabled;
|
||||
|
||||
struct kvm_pmu_event_filter *pmu_event_filter;
|
||||
struct kvm_pmu_event_filter __rcu *pmu_event_filter;
|
||||
struct task_struct *nx_lpage_recovery_thread;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
|
|
@ -25,6 +25,7 @@ void __end_SYSENTER_singlestep_region(void);
|
|||
void entry_SYSENTER_compat(void);
|
||||
void __end_entry_SYSENTER_compat(void);
|
||||
void entry_SYSCALL_compat(void);
|
||||
void entry_SYSCALL_compat_safe_stack(void);
|
||||
void entry_INT80_compat(void);
|
||||
#ifdef CONFIG_XEN_PV
|
||||
void xen_entry_INT80_compat(void);
|
||||
|
|
|
@ -94,6 +94,8 @@ struct pt_regs {
|
|||
#include <asm/paravirt_types.h>
|
||||
#endif
|
||||
|
||||
#include <asm/proto.h>
|
||||
|
||||
struct cpuinfo_x86;
|
||||
struct task_struct;
|
||||
|
||||
|
@ -175,6 +177,19 @@ static inline bool any_64bit_mode(struct pt_regs *regs)
|
|||
#ifdef CONFIG_X86_64
|
||||
#define current_user_stack_pointer() current_pt_regs()->sp
|
||||
#define compat_user_stack_pointer() current_pt_regs()->sp
|
||||
|
||||
static inline bool ip_within_syscall_gap(struct pt_regs *regs)
|
||||
{
|
||||
bool ret = (regs->ip >= (unsigned long)entry_SYSCALL_64 &&
|
||||
regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack);
|
||||
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
ret = ret || (regs->ip >= (unsigned long)entry_SYSCALL_compat &&
|
||||
regs->ip < (unsigned long)entry_SYSCALL_compat_safe_stack);
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
|
||||
|
|
|
@ -58,9 +58,8 @@ static __always_inline unsigned long smap_save(void)
|
|||
unsigned long flags;
|
||||
|
||||
asm volatile ("# smap_save\n\t"
|
||||
ALTERNATIVE("jmp 1f", "", X86_FEATURE_SMAP)
|
||||
"pushf; pop %0; " __ASM_CLAC "\n\t"
|
||||
"1:"
|
||||
ALTERNATIVE("", "pushf; pop %0; " __ASM_CLAC "\n\t",
|
||||
X86_FEATURE_SMAP)
|
||||
: "=rm" (flags) : : "memory", "cc");
|
||||
|
||||
return flags;
|
||||
|
@ -69,9 +68,8 @@ static __always_inline unsigned long smap_save(void)
|
|||
static __always_inline void smap_restore(unsigned long flags)
|
||||
{
|
||||
asm volatile ("# smap_restore\n\t"
|
||||
ALTERNATIVE("jmp 1f", "", X86_FEATURE_SMAP)
|
||||
"push %0; popf\n\t"
|
||||
"1:"
|
||||
ALTERNATIVE("", "push %0; popf\n\t",
|
||||
X86_FEATURE_SMAP)
|
||||
: : "g" (flags) : "memory", "cc");
|
||||
}
|
||||
|
||||
|
|
|
@ -268,21 +268,20 @@ static void __init kvmclock_init_mem(void)
|
|||
|
||||
static int __init kvm_setup_vsyscall_timeinfo(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
u8 flags;
|
||||
|
||||
if (!per_cpu(hv_clock_per_cpu, 0) || !kvmclock_vsyscall)
|
||||
return 0;
|
||||
|
||||
flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
|
||||
if (!(flags & PVCLOCK_TSC_STABLE_BIT))
|
||||
return 0;
|
||||
|
||||
kvm_clock.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
|
||||
#endif
|
||||
|
||||
kvmclock_init_mem();
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (per_cpu(hv_clock_per_cpu, 0) && kvmclock_vsyscall) {
|
||||
u8 flags;
|
||||
|
||||
flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
|
||||
if (!(flags & PVCLOCK_TSC_STABLE_BIT))
|
||||
return 0;
|
||||
|
||||
kvm_clock.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(kvm_setup_vsyscall_timeinfo);
|
||||
|
|
|
@ -121,8 +121,18 @@ static void __init setup_vc_stacks(int cpu)
|
|||
cea_set_pte((void *)vaddr, pa, PAGE_KERNEL);
|
||||
}
|
||||
|
||||
static __always_inline bool on_vc_stack(unsigned long sp)
|
||||
static __always_inline bool on_vc_stack(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long sp = regs->sp;
|
||||
|
||||
/* User-mode RSP is not trusted */
|
||||
if (user_mode(regs))
|
||||
return false;
|
||||
|
||||
/* SYSCALL gap still has user-mode RSP */
|
||||
if (ip_within_syscall_gap(regs))
|
||||
return false;
|
||||
|
||||
return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
|
||||
}
|
||||
|
||||
|
@ -144,7 +154,7 @@ void noinstr __sev_es_ist_enter(struct pt_regs *regs)
|
|||
old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
|
||||
|
||||
/* Make room on the IST stack */
|
||||
if (on_vc_stack(regs->sp))
|
||||
if (on_vc_stack(regs))
|
||||
new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
|
||||
else
|
||||
new_ist = old_ist - sizeof(old_ist);
|
||||
|
@ -248,7 +258,7 @@ static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
|
|||
int res;
|
||||
|
||||
if (user_mode(ctxt->regs)) {
|
||||
res = insn_fetch_from_user(ctxt->regs, buffer);
|
||||
res = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
|
||||
if (!res) {
|
||||
ctxt->fi.vector = X86_TRAP_PF;
|
||||
ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
|
||||
|
@ -1248,13 +1258,12 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
|
|||
DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
|
||||
{
|
||||
struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
|
||||
irqentry_state_t irq_state;
|
||||
struct ghcb_state state;
|
||||
struct es_em_ctxt ctxt;
|
||||
enum es_result result;
|
||||
struct ghcb *ghcb;
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
/*
|
||||
* Handle #DB before calling into !noinstr code to avoid recursive #DB.
|
||||
*/
|
||||
|
@ -1263,6 +1272,8 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
|
|||
return;
|
||||
}
|
||||
|
||||
irq_state = irqentry_nmi_enter(regs);
|
||||
lockdep_assert_irqs_disabled();
|
||||
instrumentation_begin();
|
||||
|
||||
/*
|
||||
|
@ -1325,6 +1336,7 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
|
|||
|
||||
out:
|
||||
instrumentation_end();
|
||||
irqentry_nmi_exit(regs, irq_state);
|
||||
|
||||
return;
|
||||
|
||||
|
|
|
@ -694,8 +694,7 @@ asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *r
|
|||
* In the SYSCALL entry path the RSP value comes from user-space - don't
|
||||
* trust it and switch to the current kernel stack
|
||||
*/
|
||||
if (regs->ip >= (unsigned long)entry_SYSCALL_64 &&
|
||||
regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack) {
|
||||
if (ip_within_syscall_gap(regs)) {
|
||||
sp = this_cpu_read(cpu_current_top_of_stack);
|
||||
goto sync;
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
#define orc_warn_current(args...) \
|
||||
({ \
|
||||
if (state->task == current) \
|
||||
if (state->task == current && !state->error) \
|
||||
orc_warn(args); \
|
||||
})
|
||||
|
||||
|
@ -367,8 +367,8 @@ static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
|
|||
if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
|
||||
return false;
|
||||
|
||||
*ip = regs->ip;
|
||||
*sp = regs->sp;
|
||||
*ip = READ_ONCE_NOCHECK(regs->ip);
|
||||
*sp = READ_ONCE_NOCHECK(regs->sp);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -380,8 +380,8 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr
|
|||
if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
|
||||
return false;
|
||||
|
||||
*ip = regs->ip;
|
||||
*sp = regs->sp;
|
||||
*ip = READ_ONCE_NOCHECK(regs->ip);
|
||||
*sp = READ_ONCE_NOCHECK(regs->sp);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -402,12 +402,12 @@ static bool get_reg(struct unwind_state *state, unsigned int reg_off,
|
|||
return false;
|
||||
|
||||
if (state->full_regs) {
|
||||
*val = ((unsigned long *)state->regs)[reg];
|
||||
*val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (state->prev_regs) {
|
||||
*val = ((unsigned long *)state->prev_regs)[reg];
|
||||
*val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -1642,7 +1642,16 @@ static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
|
|||
}
|
||||
|
||||
if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
|
||||
kvm_wait_lapic_expire(vcpu);
|
||||
/*
|
||||
* Ensure the guest's timer has truly expired before posting an
|
||||
* interrupt. Open code the relevant checks to avoid querying
|
||||
* lapic_timer_int_injected(), which will be false since the
|
||||
* interrupt isn't yet injected. Waiting until after injecting
|
||||
* is not an option since that won't help a posted interrupt.
|
||||
*/
|
||||
if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
|
||||
vcpu->arch.apic->lapic_timer.timer_advance_ns)
|
||||
__kvm_wait_lapic_expire(vcpu);
|
||||
kvm_apic_inject_pending_timer_irqs(apic);
|
||||
return;
|
||||
}
|
||||
|
@ -2595,6 +2604,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
|
|||
|
||||
apic_update_ppr(apic);
|
||||
hrtimer_cancel(&apic->lapic_timer.timer);
|
||||
apic->lapic_timer.expired_tscdeadline = 0;
|
||||
apic_update_lvtt(apic);
|
||||
apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
|
||||
update_divide_count(apic);
|
||||
|
|
|
@ -337,7 +337,18 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt,
|
|||
cpu_relax();
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* If the SPTE is not MMU-present, there is no backing
|
||||
* page associated with the SPTE and so no side effects
|
||||
* that need to be recorded, and exclusive ownership of
|
||||
* mmu_lock ensures the SPTE can't be made present.
|
||||
* Note, zapping MMIO SPTEs is also unnecessary as they
|
||||
* are guarded by the memslots generation, not by being
|
||||
* unreachable.
|
||||
*/
|
||||
old_child_spte = READ_ONCE(*sptep);
|
||||
if (!is_shadow_present_pte(old_child_spte))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Marking the SPTE as a removed SPTE is not
|
||||
|
|
|
@ -115,13 +115,6 @@ static const struct svm_direct_access_msrs {
|
|||
{ .index = MSR_INVALID, .always = false },
|
||||
};
|
||||
|
||||
/* enable NPT for AMD64 and X86 with PAE */
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
|
||||
bool npt_enabled = true;
|
||||
#else
|
||||
bool npt_enabled;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* These 2 parameters are used to config the controls for Pause-Loop Exiting:
|
||||
* pause_filter_count: On processors that support Pause filtering(indicated
|
||||
|
@ -170,9 +163,12 @@ module_param(pause_filter_count_shrink, ushort, 0444);
|
|||
static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
|
||||
module_param(pause_filter_count_max, ushort, 0444);
|
||||
|
||||
/* allow nested paging (virtualized MMU) for all guests */
|
||||
static int npt = true;
|
||||
module_param(npt, int, S_IRUGO);
|
||||
/*
|
||||
* Use nested page tables by default. Note, NPT may get forced off by
|
||||
* svm_hardware_setup() if it's unsupported by hardware or the host kernel.
|
||||
*/
|
||||
bool npt_enabled = true;
|
||||
module_param_named(npt, npt_enabled, bool, 0444);
|
||||
|
||||
/* allow nested virtualization in KVM/SVM */
|
||||
static int nested = true;
|
||||
|
@ -988,10 +984,15 @@ static __init int svm_hardware_setup(void)
|
|||
goto err;
|
||||
}
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_NPT))
|
||||
/*
|
||||
* KVM's MMU doesn't support using 2-level paging for itself, and thus
|
||||
* NPT isn't supported if the host is using 2-level paging since host
|
||||
* CR4 is unchanged on VMRUN.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE))
|
||||
npt_enabled = false;
|
||||
|
||||
if (npt_enabled && !npt)
|
||||
if (!boot_cpu_has(X86_FEATURE_NPT))
|
||||
npt_enabled = false;
|
||||
|
||||
kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G);
|
||||
|
|
|
@ -6580,8 +6580,8 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
|
|||
int i, nr_msrs;
|
||||
struct perf_guest_switch_msr *msrs;
|
||||
|
||||
/* Note, nr_msrs may be garbage if perf_guest_get_msrs() returns NULL. */
|
||||
msrs = perf_guest_get_msrs(&nr_msrs);
|
||||
|
||||
if (!msrs)
|
||||
return;
|
||||
|
||||
|
|
|
@ -10601,7 +10601,7 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
|
|||
return (void __user *)hva;
|
||||
} else {
|
||||
if (!slot || !slot->npages)
|
||||
return 0;
|
||||
return NULL;
|
||||
|
||||
old_npages = slot->npages;
|
||||
hva = slot->userspace_addr;
|
||||
|
|
|
@ -1415,6 +1415,25 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs)
|
|||
}
|
||||
}
|
||||
|
||||
static unsigned long insn_get_effective_ip(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long seg_base = 0;
|
||||
|
||||
/*
|
||||
* If not in user-space long mode, a custom code segment could be in
|
||||
* use. This is true in protected mode (if the process defined a local
|
||||
* descriptor table), or virtual-8086 mode. In most of the cases
|
||||
* seg_base will be zero as in USER_CS.
|
||||
*/
|
||||
if (!user_64bit_mode(regs)) {
|
||||
seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS);
|
||||
if (seg_base == -1L)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return seg_base + regs->ip;
|
||||
}
|
||||
|
||||
/**
|
||||
* insn_fetch_from_user() - Copy instruction bytes from user-space memory
|
||||
* @regs: Structure with register values as seen when entering kernel mode
|
||||
|
@ -1431,24 +1450,43 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs)
|
|||
*/
|
||||
int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
|
||||
{
|
||||
unsigned long seg_base = 0;
|
||||
unsigned long ip;
|
||||
int not_copied;
|
||||
|
||||
/*
|
||||
* If not in user-space long mode, a custom code segment could be in
|
||||
* use. This is true in protected mode (if the process defined a local
|
||||
* descriptor table), or virtual-8086 mode. In most of the cases
|
||||
* seg_base will be zero as in USER_CS.
|
||||
*/
|
||||
if (!user_64bit_mode(regs)) {
|
||||
seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS);
|
||||
if (seg_base == -1L)
|
||||
return 0;
|
||||
}
|
||||
ip = insn_get_effective_ip(regs);
|
||||
if (!ip)
|
||||
return 0;
|
||||
|
||||
not_copied = copy_from_user(buf, (void __user *)ip, MAX_INSN_SIZE);
|
||||
|
||||
not_copied = copy_from_user(buf, (void __user *)(seg_base + regs->ip),
|
||||
MAX_INSN_SIZE);
|
||||
return MAX_INSN_SIZE - not_copied;
|
||||
}
|
||||
|
||||
/**
|
||||
* insn_fetch_from_user_inatomic() - Copy instruction bytes from user-space memory
|
||||
* while in atomic code
|
||||
* @regs: Structure with register values as seen when entering kernel mode
|
||||
* @buf: Array to store the fetched instruction
|
||||
*
|
||||
* Gets the linear address of the instruction and copies the instruction bytes
|
||||
* to the buf. This function must be used in atomic context.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* Number of instruction bytes copied.
|
||||
*
|
||||
* 0 if nothing was copied.
|
||||
*/
|
||||
int insn_fetch_from_user_inatomic(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
|
||||
{
|
||||
unsigned long ip;
|
||||
int not_copied;
|
||||
|
||||
ip = insn_get_effective_ip(regs);
|
||||
if (!ip)
|
||||
return 0;
|
||||
|
||||
not_copied = __copy_from_user_inatomic(buf, (void __user *)ip, MAX_INSN_SIZE);
|
||||
|
||||
return MAX_INSN_SIZE - not_copied;
|
||||
}
|
||||
|
|
|
@ -741,7 +741,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||
map_ops[i].status = GNTST_general_error;
|
||||
unmap[0].host_addr = map_ops[i].host_addr,
|
||||
unmap[0].handle = map_ops[i].handle;
|
||||
map_ops[i].handle = ~0;
|
||||
map_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||
if (map_ops[i].flags & GNTMAP_device_map)
|
||||
unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr;
|
||||
else
|
||||
|
@ -751,7 +751,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||
kmap_ops[i].status = GNTST_general_error;
|
||||
unmap[1].host_addr = kmap_ops[i].host_addr,
|
||||
unmap[1].handle = kmap_ops[i].handle;
|
||||
kmap_ops[i].handle = ~0;
|
||||
kmap_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||
if (kmap_ops[i].flags & GNTMAP_device_map)
|
||||
unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr;
|
||||
else
|
||||
|
@ -776,7 +776,6 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||
out:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
|
||||
|
||||
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
||||
struct gnttab_unmap_grant_ref *kunmap_ops,
|
||||
|
@ -802,7 +801,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
|
||||
|
||||
#ifdef CONFIG_XEN_DEBUG_FS
|
||||
#include <linux/debugfs.h>
|
||||
|
|
14
block/bio.c
14
block/bio.c
|
@ -33,7 +33,7 @@ static struct biovec_slab {
|
|||
{ .nr_vecs = 16, .name = "biovec-16" },
|
||||
{ .nr_vecs = 64, .name = "biovec-64" },
|
||||
{ .nr_vecs = 128, .name = "biovec-128" },
|
||||
{ .nr_vecs = BIO_MAX_PAGES, .name = "biovec-max" },
|
||||
{ .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
|
||||
};
|
||||
|
||||
static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
|
||||
|
@ -46,7 +46,7 @@ static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
|
|||
return &bvec_slabs[1];
|
||||
case 65 ... 128:
|
||||
return &bvec_slabs[2];
|
||||
case 129 ... BIO_MAX_PAGES:
|
||||
case 129 ... BIO_MAX_VECS:
|
||||
return &bvec_slabs[3];
|
||||
default:
|
||||
BUG();
|
||||
|
@ -151,9 +151,9 @@ out:
|
|||
|
||||
void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
|
||||
{
|
||||
BIO_BUG_ON(nr_vecs > BIO_MAX_PAGES);
|
||||
BIO_BUG_ON(nr_vecs > BIO_MAX_VECS);
|
||||
|
||||
if (nr_vecs == BIO_MAX_PAGES)
|
||||
if (nr_vecs == BIO_MAX_VECS)
|
||||
mempool_free(bv, pool);
|
||||
else if (nr_vecs > BIO_INLINE_VECS)
|
||||
kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
|
||||
|
@ -186,15 +186,15 @@ struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
|
|||
/*
|
||||
* Try a slab allocation first for all smaller allocations. If that
|
||||
* fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
|
||||
* The mempool is sized to handle up to BIO_MAX_PAGES entries.
|
||||
* The mempool is sized to handle up to BIO_MAX_VECS entries.
|
||||
*/
|
||||
if (*nr_vecs < BIO_MAX_PAGES) {
|
||||
if (*nr_vecs < BIO_MAX_VECS) {
|
||||
struct bio_vec *bvl;
|
||||
|
||||
bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
|
||||
if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
|
||||
return bvl;
|
||||
*nr_vecs = BIO_MAX_PAGES;
|
||||
*nr_vecs = BIO_MAX_VECS;
|
||||
}
|
||||
|
||||
return mempool_alloc(pool, gfp_mask);
|
||||
|
|
|
@ -109,6 +109,7 @@ void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
|
|||
|
||||
lockdep_assert_held(&blkg->q->queue_lock);
|
||||
|
||||
memset(sum, 0, sizeof(*sum));
|
||||
rcu_read_lock();
|
||||
blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
|
||||
struct blkg_rwstat *rwstat;
|
||||
|
@ -122,7 +123,7 @@ void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
|
|||
rwstat = (void *)pos_blkg + off;
|
||||
|
||||
for (i = 0; i < BLKG_RWSTAT_NR; i++)
|
||||
sum->cnt[i] = blkg_rwstat_read_counter(rwstat, i);
|
||||
sum->cnt[i] += blkg_rwstat_read_counter(rwstat, i);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
|
|
@ -219,7 +219,7 @@ static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
|
|||
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
num_sectors += bv.bv_len >> SECTOR_SHIFT;
|
||||
if (++i == BIO_MAX_PAGES)
|
||||
if (++i == BIO_MAX_VECS)
|
||||
break;
|
||||
}
|
||||
if (num_sectors < bio_sectors(bio)) {
|
||||
|
|
|
@ -296,7 +296,7 @@ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
|
|||
{
|
||||
sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
|
||||
|
||||
return min(pages, (sector_t)BIO_MAX_PAGES);
|
||||
return min(pages, (sector_t)BIO_MAX_VECS);
|
||||
}
|
||||
|
||||
static int __blkdev_issue_zero_pages(struct block_device *bdev,
|
||||
|
|
|
@ -249,7 +249,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
|
|||
if (!iov_iter_count(iter))
|
||||
return -EINVAL;
|
||||
|
||||
bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
|
||||
bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_VECS));
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
bio->bi_opf |= req_op(rq);
|
||||
|
|
|
@ -240,7 +240,7 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
|
|||
*/
|
||||
if (op == REQ_OP_ZONE_RESET &&
|
||||
blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) {
|
||||
bio->bi_opf = REQ_OP_ZONE_RESET_ALL;
|
||||
bio->bi_opf = REQ_OP_ZONE_RESET_ALL | REQ_SYNC;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -318,6 +318,22 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode,
|
||||
const struct blk_zone_range *zrange)
|
||||
{
|
||||
loff_t start, end;
|
||||
|
||||
if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
|
||||
zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
|
||||
/* Out of range */
|
||||
return -EINVAL;
|
||||
|
||||
start = zrange->sector << SECTOR_SHIFT;
|
||||
end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
|
||||
|
||||
return truncate_bdev_range(bdev, mode, start, end);
|
||||
}
|
||||
|
||||
/*
|
||||
* BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
|
||||
* Called from blkdev_ioctl.
|
||||
|
@ -329,6 +345,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
struct request_queue *q;
|
||||
struct blk_zone_range zrange;
|
||||
enum req_opf op;
|
||||
int ret;
|
||||
|
||||
if (!argp)
|
||||
return -EINVAL;
|
||||
|
@ -352,6 +369,11 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
switch (cmd) {
|
||||
case BLKRESETZONE:
|
||||
op = REQ_OP_ZONE_RESET;
|
||||
|
||||
/* Invalidate the page cache, including dirty pages. */
|
||||
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
case BLKOPENZONE:
|
||||
op = REQ_OP_ZONE_OPEN;
|
||||
|
@ -366,8 +388,20 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
return -ENOTTY;
|
||||
}
|
||||
|
||||
return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
|
||||
GFP_KERNEL);
|
||||
ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
|
||||
GFP_KERNEL);
|
||||
|
||||
/*
|
||||
* Invalidate the page cache again for zone reset: writes can only be
|
||||
* direct for zoned devices so concurrent writes would not add any page
|
||||
* to the page cache after/during reset. The page cache may be filled
|
||||
* again due to concurrent reads though and dropping the pages for
|
||||
* these is fine.
|
||||
*/
|
||||
if (!ret && cmd == BLKRESETZONE)
|
||||
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned long *blk_alloc_zone_bitmap(int node,
|
||||
|
|
|
@ -229,10 +229,10 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
|
|||
* - The point of cloning the biovec is to produce a bio with a biovec
|
||||
* the caller can modify: bi_idx and bi_bvec_done should be 0.
|
||||
*
|
||||
* - The original bio could've had more than BIO_MAX_PAGES biovecs; if
|
||||
* - The original bio could've had more than BIO_MAX_VECS biovecs; if
|
||||
* we tried to clone the whole thing bio_alloc_bioset() would fail.
|
||||
* But the clone should succeed as long as the number of biovecs we
|
||||
* actually need to allocate is fewer than BIO_MAX_PAGES.
|
||||
* actually need to allocate is fewer than BIO_MAX_VECS.
|
||||
*
|
||||
* - Lastly, bi_vcnt should not be looked at or relied upon by code
|
||||
* that does not own the bio - reason being drivers don't use it for
|
||||
|
@ -299,7 +299,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
|||
int sectors = 0;
|
||||
|
||||
bio_for_each_segment(from, *bio_orig, iter) {
|
||||
if (i++ < BIO_MAX_PAGES)
|
||||
if (i++ < BIO_MAX_VECS)
|
||||
sectors += from.bv_len >> 9;
|
||||
if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn)
|
||||
bounce = true;
|
||||
|
|
|
@ -534,10 +534,8 @@ static void register_disk(struct device *parent, struct gendisk *disk,
|
|||
kobject_create_and_add("holders", &ddev->kobj);
|
||||
disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
|
||||
|
||||
if (disk->flags & GENHD_FL_HIDDEN) {
|
||||
dev_set_uevent_suppress(ddev, 0);
|
||||
if (disk->flags & GENHD_FL_HIDDEN)
|
||||
return;
|
||||
}
|
||||
|
||||
disk_scan_partitions(disk);
|
||||
|
||||
|
|
|
@ -938,6 +938,9 @@ int software_node_register(const struct software_node *node)
|
|||
if (software_node_to_swnode(node))
|
||||
return -EEXIST;
|
||||
|
||||
if (node->parent && !parent)
|
||||
return -EINVAL;
|
||||
|
||||
return PTR_ERR_OR_ZERO(swnode_register(node, parent, 0));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(software_node_register);
|
||||
|
@ -1002,25 +1005,33 @@ EXPORT_SYMBOL_GPL(fwnode_remove_software_node);
|
|||
/**
|
||||
* device_add_software_node - Assign software node to a device
|
||||
* @dev: The device the software node is meant for.
|
||||
* @swnode: The software node.
|
||||
* @node: The software node.
|
||||
*
|
||||
* This function will register @swnode and make it the secondary firmware node
|
||||
* pointer of @dev. If @dev has no primary node, then @swnode will become the primary
|
||||
* node.
|
||||
* This function will make @node the secondary firmware node pointer of @dev. If
|
||||
* @dev has no primary node, then @node will become the primary node. The
|
||||
* function will register @node automatically if it wasn't already registered.
|
||||
*/
|
||||
int device_add_software_node(struct device *dev, const struct software_node *swnode)
|
||||
int device_add_software_node(struct device *dev, const struct software_node *node)
|
||||
{
|
||||
struct swnode *swnode;
|
||||
int ret;
|
||||
|
||||
/* Only one software node per device. */
|
||||
if (dev_to_swnode(dev))
|
||||
return -EBUSY;
|
||||
|
||||
ret = software_node_register(swnode);
|
||||
if (ret)
|
||||
return ret;
|
||||
swnode = software_node_to_swnode(node);
|
||||
if (swnode) {
|
||||
kobject_get(&swnode->kobj);
|
||||
} else {
|
||||
ret = software_node_register(node);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
set_secondary_fwnode(dev, software_node_fwnode(swnode));
|
||||
swnode = software_node_to_swnode(node);
|
||||
}
|
||||
|
||||
set_secondary_fwnode(dev, &swnode->fwnode);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1324,7 +1324,7 @@ struct bm_extent {
|
|||
* A followup commit may allow even bigger BIO sizes,
|
||||
* once we thought that through. */
|
||||
#define DRBD_MAX_BIO_SIZE (1U << 20)
|
||||
#if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT)
|
||||
#if DRBD_MAX_BIO_SIZE > (BIO_MAX_VECS << PAGE_SHIFT)
|
||||
#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
|
||||
#endif
|
||||
#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */
|
||||
|
|
|
@ -871,6 +871,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
|
|||
card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event");
|
||||
if (!card->event_wq) {
|
||||
dev_err(CARD_TO_DEV(card), "Failed card event setup.\n");
|
||||
st = -ENOMEM;
|
||||
goto failed_event_handler;
|
||||
}
|
||||
|
||||
|
|
|
@ -877,6 +877,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|||
if (card->mm_pages[0].desc == NULL ||
|
||||
card->mm_pages[1].desc == NULL) {
|
||||
dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n");
|
||||
ret = -ENOMEM;
|
||||
goto failed_alloc;
|
||||
}
|
||||
reset_page(&card->mm_pages[0]);
|
||||
|
@ -888,8 +889,10 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|||
spin_lock_init(&card->lock);
|
||||
|
||||
card->queue = blk_alloc_queue(NUMA_NO_NODE);
|
||||
if (!card->queue)
|
||||
if (!card->queue) {
|
||||
ret = -ENOMEM;
|
||||
goto failed_alloc;
|
||||
}
|
||||
|
||||
tasklet_init(&card->tasklet, process_page, (unsigned long)card);
|
||||
|
||||
|
|
|
@ -627,7 +627,7 @@ static ssize_t writeback_store(struct device *dev,
|
|||
struct bio_vec bio_vec;
|
||||
struct page *page;
|
||||
ssize_t ret = len;
|
||||
int mode;
|
||||
int mode, err;
|
||||
unsigned long blk_idx = 0;
|
||||
|
||||
if (sysfs_streq(buf, "idle"))
|
||||
|
@ -638,8 +638,8 @@ static ssize_t writeback_store(struct device *dev,
|
|||
if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1))
|
||||
return -EINVAL;
|
||||
|
||||
ret = kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index);
|
||||
if (ret || index >= nr_pages)
|
||||
if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) ||
|
||||
index >= nr_pages)
|
||||
return -EINVAL;
|
||||
|
||||
nr_pages = 1;
|
||||
|
@ -663,7 +663,7 @@ static ssize_t writeback_store(struct device *dev,
|
|||
goto release_init_lock;
|
||||
}
|
||||
|
||||
while (nr_pages--) {
|
||||
for (; nr_pages != 0; index++, nr_pages--) {
|
||||
struct bio_vec bvec;
|
||||
|
||||
bvec.bv_page = page;
|
||||
|
@ -728,12 +728,17 @@ static ssize_t writeback_store(struct device *dev,
|
|||
* XXX: A single page IO would be inefficient for write
|
||||
* but it would be not bad as starter.
|
||||
*/
|
||||
ret = submit_bio_wait(&bio);
|
||||
if (ret) {
|
||||
err = submit_bio_wait(&bio);
|
||||
if (err) {
|
||||
zram_slot_lock(zram, index);
|
||||
zram_clear_flag(zram, index, ZRAM_UNDER_WB);
|
||||
zram_clear_flag(zram, index, ZRAM_IDLE);
|
||||
zram_slot_unlock(zram, index);
|
||||
/*
|
||||
* Return last IO error unless every IO were
|
||||
* not suceeded.
|
||||
*/
|
||||
ret = err;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -103,6 +103,8 @@ static const struct of_device_id whitelist[] __initconst = {
|
|||
static const struct of_device_id blacklist[] __initconst = {
|
||||
{ .compatible = "allwinner,sun50i-h6", },
|
||||
|
||||
{ .compatible = "arm,vexpress", },
|
||||
|
||||
{ .compatible = "calxeda,highbank", },
|
||||
{ .compatible = "calxeda,ecx-2000", },
|
||||
|
||||
|
|
|
@ -317,9 +317,9 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
|
|||
}
|
||||
|
||||
base = ioremap(res->start, resource_size(res));
|
||||
if (IS_ERR(base)) {
|
||||
if (!base) {
|
||||
dev_err(dev, "failed to map resource %pR\n", res);
|
||||
ret = PTR_ERR(base);
|
||||
ret = -ENOMEM;
|
||||
goto release_region;
|
||||
}
|
||||
|
||||
|
@ -374,7 +374,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
|
|||
error:
|
||||
kfree(data);
|
||||
unmap_base:
|
||||
iounmap(data->base);
|
||||
iounmap(base);
|
||||
release_region:
|
||||
release_mem_region(res->start, resource_size(res));
|
||||
return ret;
|
||||
|
|
|
@ -24,7 +24,7 @@ efi_status_t check_platform_features(void)
|
|||
return EFI_SUCCESS;
|
||||
|
||||
tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
|
||||
if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) {
|
||||
if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) {
|
||||
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
|
||||
efi_err("This 64 KB granular kernel is not supported by your CPU\n");
|
||||
else
|
||||
|
|
|
@ -96,6 +96,18 @@ static void install_memreserve_table(void)
|
|||
efi_err("Failed to install memreserve config table!\n");
|
||||
}
|
||||
|
||||
static u32 get_supported_rt_services(void)
|
||||
{
|
||||
const efi_rt_properties_table_t *rt_prop_table;
|
||||
u32 supported = EFI_RT_SUPPORTED_ALL;
|
||||
|
||||
rt_prop_table = get_efi_config_table(EFI_RT_PROPERTIES_TABLE_GUID);
|
||||
if (rt_prop_table)
|
||||
supported &= rt_prop_table->runtime_services_supported;
|
||||
|
||||
return supported;
|
||||
}
|
||||
|
||||
/*
|
||||
* EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint
|
||||
* that is described in the PE/COFF header. Most of the code is the same
|
||||
|
@ -250,6 +262,10 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
|
|||
(prop_tbl->memory_protection_attribute &
|
||||
EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA);
|
||||
|
||||
/* force efi_novamap if SetVirtualAddressMap() is unsupported */
|
||||
efi_novamap |= !(get_supported_rt_services() &
|
||||
EFI_RT_SUPPORTED_SET_VIRTUAL_ADDRESS_MAP);
|
||||
|
||||
/* hibernation expects the runtime regions to stay in the same place */
|
||||
if (!IS_ENABLED(CONFIG_HIBERNATION) && !efi_nokaslr && !flat_va_mapping) {
|
||||
/*
|
||||
|
|
|
@ -228,6 +228,7 @@ source "drivers/gpu/drm/arm/Kconfig"
|
|||
config DRM_RADEON
|
||||
tristate "ATI Radeon"
|
||||
depends on DRM && PCI && MMU
|
||||
depends on AGP || !AGP
|
||||
select FW_LOADER
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_TTM
|
||||
|
|
|
@ -180,6 +180,7 @@ extern uint amdgpu_smu_memory_pool_size;
|
|||
extern uint amdgpu_dc_feature_mask;
|
||||
extern uint amdgpu_dc_debug_mask;
|
||||
extern uint amdgpu_dm_abm_level;
|
||||
extern int amdgpu_backlight;
|
||||
extern struct amdgpu_mgpu_info mgpu_info;
|
||||
extern int amdgpu_ras_enable;
|
||||
extern uint amdgpu_ras_mask;
|
||||
|
|
|
@ -903,7 +903,7 @@ void amdgpu_acpi_fini(struct amdgpu_device *adev)
|
|||
*/
|
||||
bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
#if defined(CONFIG_AMD_PMC)
|
||||
#if defined(CONFIG_AMD_PMC) || defined(CONFIG_AMD_PMC_MODULE)
|
||||
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return true;
|
||||
|
|
|
@ -781,6 +781,10 @@ uint amdgpu_dm_abm_level;
|
|||
MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) ");
|
||||
module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444);
|
||||
|
||||
int amdgpu_backlight = -1;
|
||||
MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))");
|
||||
module_param_named(backlight, amdgpu_backlight, bint, 0444);
|
||||
|
||||
/**
|
||||
* DOC: tmz (int)
|
||||
* Trusted Memory Zone (TMZ) is a method to protect data being written
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче