Linux 4.11-rc6
-----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJY6mY1AAoJEHm+PkMAQRiGB14IAImsH28JPjxJVDasMIRPBxVc euPPlZgoBieu7sNt+kEsEqdkXuu0MLk6gln0IGxWLeoB2S+u3Tz5LMa2YArVqV9Z tWzOnI9auE73P2Pz/tUMOdyMs5tO0PolQxX3uljbULBozOHjHRh13fsXchX2yQvl mFeFCDqpPV0KhWRH/ciA8uIHdvYPhMpkKgRtmR8jXL0yzqLp6+2J+Bs8nHG4NNng HMVxZPC8jOE/TgWq6k/GmXgxh3H/AideFdHFbLKYnIFJW41ZGOI8a262zq3NmjPd lywpVU7O7RMhSITY5PnuR3LpNV8ftw1hz2y6t35unyFK1P02adOSj5GJ3hGdhaQ= =Xz5O -----END PGP SIGNATURE----- Backmerge tag 'v4.11-rc6' into drm-next Linux 4.11-rc6 drm-misc needs 4.11-rc5, may as well fix conflicts with rc6.
This commit is contained in:
Коммит
b769fefb68
1
.mailmap
1
.mailmap
|
@ -171,6 +171,7 @@ Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
|
|||
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
|
||||
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
|
||||
Takashi YOSHII <takashi.yoshii.zj@renesas.com>
|
||||
Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
|
||||
Yusuke Goda <goda.yusuke@renesas.com>
|
||||
Gustavo Padovan <gustavo@las.ic.unicamp.br>
|
||||
Gustavo Padovan <padovan@profusion.mobi>
|
||||
|
|
|
@ -1725,6 +1725,12 @@
|
|||
kernel and module base offset ASLR (Address Space
|
||||
Layout Randomization).
|
||||
|
||||
kasan_multi_shot
|
||||
[KNL] Enforce KASAN (Kernel Address Sanitizer) to print
|
||||
report on every invalid memory access. Without this
|
||||
parameter KASAN will print report only for the first
|
||||
invalid access.
|
||||
|
||||
keepinitrd [HW,ARM]
|
||||
|
||||
kernelcore= [KNL,X86,IA-64,PPC]
|
||||
|
|
|
@ -12,7 +12,8 @@ Required properties:
|
|||
- reg : Offset and length of the register set for the module
|
||||
- interrupts : the interrupt number for the RNG module.
|
||||
Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76"
|
||||
- clocks: the trng clock source
|
||||
- clocks: the trng clock source. Only mandatory for the
|
||||
"inside-secure,safexcel-eip76" compatible.
|
||||
|
||||
Example:
|
||||
/* AM335x */
|
||||
|
|
|
@ -58,8 +58,7 @@ prototypes:
|
|||
int (*permission) (struct inode *, int, unsigned int);
|
||||
int (*get_acl)(struct inode *, int);
|
||||
int (*setattr) (struct dentry *, struct iattr *);
|
||||
int (*getattr) (const struct path *, struct dentry *, struct kstat *,
|
||||
u32, unsigned int);
|
||||
int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
|
||||
ssize_t (*listxattr) (struct dentry *, char *, size_t);
|
||||
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
|
||||
void (*update_time)(struct inode *, struct timespec *, int);
|
||||
|
|
|
@ -600,3 +600,9 @@ in your dentry operations instead.
|
|||
[recommended]
|
||||
->readlink is optional for symlinks. Don't set, unless filesystem needs
|
||||
to fake something for readlink(2).
|
||||
--
|
||||
[mandatory]
|
||||
->getattr() is now passed a struct path rather than a vfsmount and
|
||||
dentry separately, and it now has request_mask and query_flags arguments
|
||||
to specify the fields and sync type requested by statx. Filesystems not
|
||||
supporting any statx-specific features may ignore the new arguments.
|
||||
|
|
|
@ -382,8 +382,7 @@ struct inode_operations {
|
|||
int (*permission) (struct inode *, int);
|
||||
int (*get_acl)(struct inode *, int);
|
||||
int (*setattr) (struct dentry *, struct iattr *);
|
||||
int (*getattr) (const struct path *, struct dentry *, struct kstat *,
|
||||
u32, unsigned int);
|
||||
int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
|
||||
ssize_t (*listxattr) (struct dentry *, char *, size_t);
|
||||
void (*update_time)(struct inode *, struct timespec *, int);
|
||||
int (*atomic_open)(struct inode *, struct dentry *, struct file *,
|
||||
|
|
|
@ -77,9 +77,15 @@ static struct pinctrl_desc foo_desc = {
|
|||
|
||||
int __init foo_probe(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
struct pinctrl_dev *pctl;
|
||||
|
||||
return pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl);
|
||||
error = pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
return pinctrl_enable(pctl);
|
||||
}
|
||||
|
||||
To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and
|
||||
|
|
|
@ -124,7 +124,7 @@ specified in the following format in the sign-off area:
|
|||
|
||||
.. code-block:: none
|
||||
|
||||
Cc: <stable@vger.kernel.org> # 3.3.x-
|
||||
Cc: <stable@vger.kernel.org> # 3.3.x
|
||||
|
||||
The tag has the meaning of:
|
||||
|
||||
|
|
|
@ -3377,6 +3377,69 @@ struct kvm_ppc_resize_hpt {
|
|||
__u32 pad;
|
||||
};
|
||||
|
||||
4.104 KVM_X86_GET_MCE_CAP_SUPPORTED
|
||||
|
||||
Capability: KVM_CAP_MCE
|
||||
Architectures: x86
|
||||
Type: system ioctl
|
||||
Parameters: u64 mce_cap (out)
|
||||
Returns: 0 on success, -1 on error
|
||||
|
||||
Returns supported MCE capabilities. The u64 mce_cap parameter
|
||||
has the same format as the MSR_IA32_MCG_CAP register. Supported
|
||||
capabilities will have the corresponding bits set.
|
||||
|
||||
4.105 KVM_X86_SETUP_MCE
|
||||
|
||||
Capability: KVM_CAP_MCE
|
||||
Architectures: x86
|
||||
Type: vcpu ioctl
|
||||
Parameters: u64 mcg_cap (in)
|
||||
Returns: 0 on success,
|
||||
-EFAULT if u64 mcg_cap cannot be read,
|
||||
-EINVAL if the requested number of banks is invalid,
|
||||
-EINVAL if requested MCE capability is not supported.
|
||||
|
||||
Initializes MCE support for use. The u64 mcg_cap parameter
|
||||
has the same format as the MSR_IA32_MCG_CAP register and
|
||||
specifies which capabilities should be enabled. The maximum
|
||||
supported number of error-reporting banks can be retrieved when
|
||||
checking for KVM_CAP_MCE. The supported capabilities can be
|
||||
retrieved with KVM_X86_GET_MCE_CAP_SUPPORTED.
|
||||
|
||||
4.106 KVM_X86_SET_MCE
|
||||
|
||||
Capability: KVM_CAP_MCE
|
||||
Architectures: x86
|
||||
Type: vcpu ioctl
|
||||
Parameters: struct kvm_x86_mce (in)
|
||||
Returns: 0 on success,
|
||||
-EFAULT if struct kvm_x86_mce cannot be read,
|
||||
-EINVAL if the bank number is invalid,
|
||||
-EINVAL if VAL bit is not set in status field.
|
||||
|
||||
Inject a machine check error (MCE) into the guest. The input
|
||||
parameter is:
|
||||
|
||||
struct kvm_x86_mce {
|
||||
__u64 status;
|
||||
__u64 addr;
|
||||
__u64 misc;
|
||||
__u64 mcg_status;
|
||||
__u8 bank;
|
||||
__u8 pad1[7];
|
||||
__u64 pad2[3];
|
||||
};
|
||||
|
||||
If the MCE being reported is an uncorrected error, KVM will
|
||||
inject it as an MCE exception into the guest. If the guest
|
||||
MCG_STATUS register reports that an MCE is in progress, KVM
|
||||
causes an KVM_EXIT_SHUTDOWN vmexit.
|
||||
|
||||
Otherwise, if the MCE is a corrected error, KVM will just
|
||||
store it in the corresponding bank (provided this bank is
|
||||
not holding a previously reported uncorrected error).
|
||||
|
||||
5. The kvm_run structure
|
||||
------------------------
|
||||
|
||||
|
|
|
@ -83,6 +83,12 @@ Groups:
|
|||
|
||||
Bits for undefined preemption levels are RAZ/WI.
|
||||
|
||||
For historical reasons and to provide ABI compatibility with userspace we
|
||||
export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask
|
||||
field in the lower 5 bits of a word, meaning that userspace must always
|
||||
use the lower 5 bits to communicate with the KVM device and must shift the
|
||||
value left by 3 places to obtain the actual priority mask level.
|
||||
|
||||
Limitations:
|
||||
- Priorities are not implemented, and registers are RAZ/WI
|
||||
- Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
|
||||
|
|
24
MAINTAINERS
24
MAINTAINERS
|
@ -4117,14 +4117,13 @@ F: drivers/block/drbd/
|
|||
F: lib/lru_cache.c
|
||||
F: Documentation/blockdev/drbd/
|
||||
|
||||
DRIVER CORE, KOBJECTS, DEBUGFS, KERNFS AND SYSFS
|
||||
DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
|
||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
|
||||
S: Supported
|
||||
F: Documentation/kobject.txt
|
||||
F: drivers/base/
|
||||
F: fs/debugfs/
|
||||
F: fs/kernfs/
|
||||
F: fs/sysfs/
|
||||
F: include/linux/debugfs.h
|
||||
F: include/linux/kobj*
|
||||
|
@ -4784,6 +4783,12 @@ L: linux-edac@vger.kernel.org
|
|||
S: Maintained
|
||||
F: drivers/edac/mpc85xx_edac.[ch]
|
||||
|
||||
EDAC-PND2
|
||||
M: Tony Luck <tony.luck@intel.com>
|
||||
L: linux-edac@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/edac/pnd2_edac.[ch]
|
||||
|
||||
EDAC-PASEMI
|
||||
M: Egor Martovetsky <egor@pasemi.com>
|
||||
L: linux-edac@vger.kernel.org
|
||||
|
@ -4931,6 +4936,7 @@ F: include/linux/netfilter_bridge/
|
|||
F: net/bridge/
|
||||
|
||||
ETHERNET PHY LIBRARY
|
||||
M: Andrew Lunn <andrew@lunn.ch>
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -7092,9 +7098,9 @@ S: Maintained
|
|||
F: fs/autofs4/
|
||||
|
||||
KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
|
||||
M: Masahiro Yamada <yamada.masahiro@socionext.com>
|
||||
M: Michal Marek <mmarek@suse.com>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git
|
||||
L: linux-kbuild@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/kbuild/
|
||||
|
@ -7211,6 +7217,14 @@ F: arch/mips/include/uapi/asm/kvm*
|
|||
F: arch/mips/include/asm/kvm*
|
||||
F: arch/mips/kvm/
|
||||
|
||||
KERNFS
|
||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
M: Tejun Heo <tj@kernel.org>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
|
||||
S: Supported
|
||||
F: include/linux/kernfs.h
|
||||
F: fs/kernfs/
|
||||
|
||||
KEXEC
|
||||
M: Eric Biederman <ebiederm@xmission.com>
|
||||
W: http://kernel.org/pub/linux/utils/kernel/kexec/
|
||||
|
@ -10825,6 +10839,7 @@ F: drivers/s390/block/dasd*
|
|||
F: block/partitions/ibm.c
|
||||
|
||||
S390 NETWORK DRIVERS
|
||||
M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
|
||||
M: Ursula Braun <ubraun@linux.vnet.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
|
@ -10855,6 +10870,7 @@ S: Supported
|
|||
F: drivers/s390/scsi/zfcp_*
|
||||
|
||||
S390 IUCV NETWORK LAYER
|
||||
M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
|
||||
M: Ursula Braun <ubraun@linux.vnet.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
|
|
16
Makefile
16
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 11
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -372,7 +372,7 @@ LDFLAGS_MODULE =
|
|||
CFLAGS_KERNEL =
|
||||
AFLAGS_KERNEL =
|
||||
LDFLAGS_vmlinux =
|
||||
CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
|
||||
CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
|
||||
CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
|
||||
|
||||
|
||||
|
@ -653,6 +653,12 @@ KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
|
|||
# Tell gcc to never replace conditional load with a non-conditional one
|
||||
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
|
||||
|
||||
# check for 'asm goto'
|
||||
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
|
||||
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
|
||||
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
|
||||
endif
|
||||
|
||||
include scripts/Makefile.gcc-plugins
|
||||
|
||||
ifdef CONFIG_READABLE_ASM
|
||||
|
@ -798,12 +804,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
|
|||
# use the deterministic mode of AR if available
|
||||
KBUILD_ARFLAGS := $(call ar-option,D)
|
||||
|
||||
# check for 'asm goto'
|
||||
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
|
||||
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
|
||||
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
|
||||
endif
|
||||
|
||||
include scripts/Makefile.kasan
|
||||
include scripts/Makefile.extrawarn
|
||||
include scripts/Makefile.ubsan
|
||||
|
|
|
@ -1290,7 +1290,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
|
|||
/* copy relevant bits of struct timex. */
|
||||
if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) ||
|
||||
copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) -
|
||||
offsetof(struct timex32, time)))
|
||||
offsetof(struct timex32, tick)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = do_adjtimex(&txc);
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
device_type = "cpu";
|
||||
compatible = "snps,arc770d";
|
||||
reg = <0>;
|
||||
clocks = <&core_clk>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
device_type = "cpu";
|
||||
compatible = "snps,archs38";
|
||||
reg = <0>;
|
||||
clocks = <&core_clk>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -19,8 +19,27 @@
|
|||
|
||||
cpu@0 {
|
||||
device_type = "cpu";
|
||||
compatible = "snps,archs38xN";
|
||||
compatible = "snps,archs38";
|
||||
reg = <0>;
|
||||
clocks = <&core_clk>;
|
||||
};
|
||||
cpu@1 {
|
||||
device_type = "cpu";
|
||||
compatible = "snps,archs38";
|
||||
reg = <1>;
|
||||
clocks = <&core_clk>;
|
||||
};
|
||||
cpu@2 {
|
||||
device_type = "cpu";
|
||||
compatible = "snps,archs38";
|
||||
reg = <2>;
|
||||
clocks = <&core_clk>;
|
||||
};
|
||||
cpu@3 {
|
||||
device_type = "cpu";
|
||||
compatible = "snps,archs38";
|
||||
reg = <3>;
|
||||
clocks = <&core_clk>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -112,13 +112,19 @@
|
|||
interrupts = <7>;
|
||||
bus-width = <4>;
|
||||
};
|
||||
};
|
||||
|
||||
/* Embedded Vision subsystem UIO mappings; only relevant for EV VDK */
|
||||
/*
|
||||
* Embedded Vision subsystem UIO mappings; only relevant for EV VDK
|
||||
*
|
||||
* This node is intentionally put outside of MB above becase
|
||||
* it maps areas outside of MB's 0xEz-0xFz.
|
||||
*/
|
||||
uio_ev: uio@0xD0000000 {
|
||||
compatible = "generic-uio";
|
||||
reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>;
|
||||
reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem";
|
||||
interrupt-parent = <&mb_intc>;
|
||||
interrupts = <23>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -54,9 +54,7 @@ int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
|
|||
void kretprobe_trampoline(void);
|
||||
void trap_is_kprobe(unsigned long address, struct pt_regs *regs);
|
||||
#else
|
||||
static void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
#define trap_is_kprobe(address, regs)
|
||||
#endif /* CONFIG_KPROBES */
|
||||
|
||||
#endif /* _ARC_KPROBES_H */
|
||||
|
|
|
@ -100,15 +100,21 @@ END(handle_interrupt)
|
|||
;################### Non TLB Exception Handling #############################
|
||||
|
||||
ENTRY(EV_SWI)
|
||||
flag 1
|
||||
; TODO: implement this
|
||||
EXCEPTION_PROLOGUE
|
||||
b ret_from_exception
|
||||
END(EV_SWI)
|
||||
|
||||
ENTRY(EV_DivZero)
|
||||
flag 1
|
||||
; TODO: implement this
|
||||
EXCEPTION_PROLOGUE
|
||||
b ret_from_exception
|
||||
END(EV_DivZero)
|
||||
|
||||
ENTRY(EV_DCError)
|
||||
flag 1
|
||||
; TODO: implement this
|
||||
EXCEPTION_PROLOGUE
|
||||
b ret_from_exception
|
||||
END(EV_DCError)
|
||||
|
||||
; ---------------------------------------------
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/fs.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/root_dev.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/console.h>
|
||||
|
@ -488,8 +489,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
{
|
||||
char *str;
|
||||
int cpu_id = ptr_to_cpu(v);
|
||||
struct device_node *core_clk = of_find_node_by_name(NULL, "core_clk");
|
||||
u32 freq = 0;
|
||||
struct device *cpu_dev = get_cpu_device(cpu_id);
|
||||
struct clk *cpu_clk;
|
||||
unsigned long freq = 0;
|
||||
|
||||
if (!cpu_online(cpu_id)) {
|
||||
seq_printf(m, "processor [%d]\t: Offline\n", cpu_id);
|
||||
|
@ -502,9 +504,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
|
||||
seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
|
||||
|
||||
of_property_read_u32(core_clk, "clock-frequency", &freq);
|
||||
cpu_clk = clk_get(cpu_dev, NULL);
|
||||
if (IS_ERR(cpu_clk)) {
|
||||
seq_printf(m, "CPU speed \t: Cannot get clock for processor [%d]\n",
|
||||
cpu_id);
|
||||
} else {
|
||||
freq = clk_get_rate(cpu_clk);
|
||||
}
|
||||
if (freq)
|
||||
seq_printf(m, "CPU speed\t: %u.%02u Mhz\n",
|
||||
seq_printf(m, "CPU speed\t: %lu.%02lu Mhz\n",
|
||||
freq / 1000000, (freq / 10000) % 100);
|
||||
|
||||
seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",
|
||||
|
|
|
@ -633,6 +633,9 @@ noinline static void slc_entire_op(const int op)
|
|||
|
||||
write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
|
||||
|
||||
/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
|
||||
read_aux_reg(r);
|
||||
|
||||
/* Important to wait for flush to complete */
|
||||
while (read_aux_reg(r) & SLC_CTRL_BUSY);
|
||||
}
|
||||
|
|
|
@ -113,8 +113,8 @@
|
|||
simple-audio-card,mclk-fs = <512>;
|
||||
simple-audio-card,aux-devs = <&codec_analog>;
|
||||
simple-audio-card,routing =
|
||||
"Left DAC", "Digital Left DAC",
|
||||
"Right DAC", "Digital Right DAC";
|
||||
"Left DAC", "AIF1 Slot 0 Left",
|
||||
"Right DAC", "AIF1 Slot 0 Right";
|
||||
status = "disabled";
|
||||
|
||||
simple-audio-card,cpu {
|
||||
|
|
|
@ -1124,6 +1124,9 @@ static void cpu_hyp_reinit(void)
|
|||
if (__hyp_get_vectors() == hyp_default_vectors)
|
||||
cpu_init_hyp_mode(NULL);
|
||||
}
|
||||
|
||||
if (vgic_present)
|
||||
kvm_vgic_init_cpu_hardware();
|
||||
}
|
||||
|
||||
static void cpu_hyp_reset(void)
|
||||
|
|
|
@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
|
|||
phys_addr_t addr = start, end = start + size;
|
||||
phys_addr_t next;
|
||||
|
||||
assert_spin_locked(&kvm->mmu_lock);
|
||||
pgd = kvm->arch.pgd + stage2_pgd_index(addr);
|
||||
do {
|
||||
next = stage2_pgd_addr_end(addr, end);
|
||||
if (!stage2_pgd_none(*pgd))
|
||||
unmap_stage2_puds(kvm, pgd, addr, next);
|
||||
/*
|
||||
* If the range is too large, release the kvm->mmu_lock
|
||||
* to prevent starvation and lockup detector warnings.
|
||||
*/
|
||||
if (next != end)
|
||||
cond_resched_lock(&kvm->mmu_lock);
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
|
@ -803,6 +810,7 @@ void stage2_unmap_vm(struct kvm *kvm)
|
|||
int idx;
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
|
||||
slots = kvm_memslots(kvm);
|
||||
|
@ -810,6 +818,7 @@ void stage2_unmap_vm(struct kvm *kvm)
|
|||
stage2_unmap_memslot(kvm, memslot);
|
||||
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
}
|
||||
|
||||
|
@ -829,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
|
|||
if (kvm->arch.pgd == NULL)
|
||||
return;
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
|
||||
/* Free the HW pgd, one page at a time */
|
||||
free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
|
||||
kvm->arch.pgd = NULL;
|
||||
|
@ -1801,6 +1813,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
(KVM_PHYS_SIZE >> PAGE_SHIFT))
|
||||
return -EFAULT;
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
/*
|
||||
* A memory region could potentially cover multiple VMAs, and any holes
|
||||
* between them, so iterate over all of them to find out if we can map
|
||||
|
@ -1844,8 +1857,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
pa += vm_start - vma->vm_start;
|
||||
|
||||
/* IO region dirty page logging not allowed */
|
||||
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
|
||||
return -EINVAL;
|
||||
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
|
||||
vm_end - vm_start,
|
||||
|
@ -1857,7 +1872,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
} while (hva < reg_end);
|
||||
|
||||
if (change == KVM_MR_FLAGS_ONLY)
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
if (ret)
|
||||
|
@ -1865,6 +1880,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
else
|
||||
stage2_flush_memslot(kvm, memslot);
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
out:
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -935,13 +935,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add
|
|||
__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
|
||||
* that the intention is to allow exporting memory allocated via the
|
||||
* coherent DMA APIs through the dma_buf API, which only accepts a
|
||||
* scattertable. This presents a couple of problems:
|
||||
* 1. Not all memory allocated via the coherent DMA APIs is backed by
|
||||
* a struct page
|
||||
* 2. Passing coherent DMA memory into the streaming APIs is not allowed
|
||||
* as we will try to flush the memory through a different alias to that
|
||||
* actually being used (and the flushes are redundant.)
|
||||
*/
|
||||
int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t handle, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
|
||||
unsigned long pfn = dma_to_pfn(dev, handle);
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
/* If the PFN is not valid, we do not have a struct page */
|
||||
if (!pfn_valid(pfn))
|
||||
return -ENXIO;
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
|
||||
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
|
|
@ -303,7 +303,10 @@ static inline void set_vbar(unsigned long val)
|
|||
*/
|
||||
static inline bool security_extensions_enabled(void)
|
||||
{
|
||||
/* Check CPUID Identification Scheme before ID_PFR1 read */
|
||||
if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
|
||||
return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long __init setup_vectors_base(void)
|
||||
|
|
|
@ -266,11 +266,20 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
|
|||
#endif
|
||||
|
||||
if (p) {
|
||||
if (cur) {
|
||||
if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
|
||||
/*
|
||||
* Probe hit but conditional execution check failed,
|
||||
* so just skip the instruction and continue as if
|
||||
* nothing had happened.
|
||||
* In this case, we can skip recursing check too.
|
||||
*/
|
||||
singlestep_skip(p, regs);
|
||||
} else if (cur) {
|
||||
/* Kprobe is pending, so we're recursing. */
|
||||
switch (kcb->kprobe_status) {
|
||||
case KPROBE_HIT_ACTIVE:
|
||||
case KPROBE_HIT_SSDONE:
|
||||
case KPROBE_HIT_SS:
|
||||
/* A pre- or post-handler probe got us here. */
|
||||
kprobes_inc_nmissed_count(p);
|
||||
save_previous_kprobe(kcb);
|
||||
|
@ -279,11 +288,16 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
|
|||
singlestep(p, regs, kcb);
|
||||
restore_previous_kprobe(kcb);
|
||||
break;
|
||||
case KPROBE_REENTER:
|
||||
/* A nested probe was hit in FIQ, it is a BUG */
|
||||
pr_warn("Unrecoverable kprobe detected at %p.\n",
|
||||
p->addr);
|
||||
/* fall through */
|
||||
default:
|
||||
/* impossible cases */
|
||||
BUG();
|
||||
}
|
||||
} else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
|
||||
} else {
|
||||
/* Probe hit and conditional execution check ok. */
|
||||
set_current_kprobe(p);
|
||||
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
|
@ -304,13 +318,6 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
|
|||
}
|
||||
reset_current_kprobe();
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Probe hit but conditional execution check failed,
|
||||
* so just skip the instruction and continue as if
|
||||
* nothing had happened.
|
||||
*/
|
||||
singlestep_skip(p, regs);
|
||||
}
|
||||
} else if (cur) {
|
||||
/* We probably hit a jprobe. Call its break handler. */
|
||||
|
@ -434,6 +441,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
|
|||
struct hlist_node *tmp;
|
||||
unsigned long flags, orig_ret_address = 0;
|
||||
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
|
||||
kprobe_opcode_t *correct_ret_addr = NULL;
|
||||
|
||||
INIT_HLIST_HEAD(&empty_rp);
|
||||
kretprobe_hash_lock(current, &head, &flags);
|
||||
|
@ -456,15 +464,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
|
|||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
||||
if (ri->rp && ri->rp->handler) {
|
||||
__this_cpu_write(current_kprobe, &ri->rp->kp);
|
||||
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
ri->rp->handler(ri, regs);
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
}
|
||||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
recycle_rp_inst(ri, &empty_rp);
|
||||
|
||||
if (orig_ret_address != trampoline_address)
|
||||
/*
|
||||
|
@ -476,6 +476,33 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
|
|||
}
|
||||
|
||||
kretprobe_assert(ri, orig_ret_address, trampoline_address);
|
||||
|
||||
correct_ret_addr = ri->ret_addr;
|
||||
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
if (ri->rp && ri->rp->handler) {
|
||||
__this_cpu_write(current_kprobe, &ri->rp->kp);
|
||||
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
ri->ret_addr = correct_ret_addr;
|
||||
ri->rp->handler(ri, regs);
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
}
|
||||
|
||||
recycle_rp_inst(ri, &empty_rp);
|
||||
|
||||
if (orig_ret_address != trampoline_address)
|
||||
/*
|
||||
* This is the real return address. Any other
|
||||
* instances associated with this task are for
|
||||
* other calls deeper on the call stack
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
kretprobe_hash_unlock(current, &flags);
|
||||
|
||||
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
|
||||
|
|
|
@ -977,7 +977,10 @@ static void coverage_end(void)
|
|||
void __naked __kprobes_test_case_start(void)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"stmdb sp!, {r4-r11} \n\t"
|
||||
"mov r2, sp \n\t"
|
||||
"bic r3, r2, #7 \n\t"
|
||||
"mov sp, r3 \n\t"
|
||||
"stmdb sp!, {r2-r11} \n\t"
|
||||
"sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
|
||||
"bic r0, lr, #1 @ r0 = inline data \n\t"
|
||||
"mov r1, sp \n\t"
|
||||
|
@ -997,7 +1000,8 @@ void __naked __kprobes_test_case_end_32(void)
|
|||
"movne pc, r0 \n\t"
|
||||
"mov r0, r4 \n\t"
|
||||
"add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
|
||||
"ldmia sp!, {r4-r11} \n\t"
|
||||
"ldmia sp!, {r2-r11} \n\t"
|
||||
"mov sp, r2 \n\t"
|
||||
"mov pc, r0 \n\t"
|
||||
);
|
||||
}
|
||||
|
@ -1013,7 +1017,8 @@ void __naked __kprobes_test_case_end_16(void)
|
|||
"bxne r0 \n\t"
|
||||
"mov r0, r4 \n\t"
|
||||
"add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
|
||||
"ldmia sp!, {r4-r11} \n\t"
|
||||
"ldmia sp!, {r2-r11} \n\t"
|
||||
"mov sp, r2 \n\t"
|
||||
"bx r0 \n\t"
|
||||
);
|
||||
}
|
||||
|
|
|
@ -3,8 +3,6 @@
|
|||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
struct task_struct;
|
||||
|
|
|
@ -944,7 +944,7 @@ static bool have_cpu_die(void)
|
|||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
int any_cpu = raw_smp_processor_id();
|
||||
|
||||
if (cpu_ops[any_cpu]->cpu_die)
|
||||
if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
|
|
|
@ -1,2 +1 @@
|
|||
vdso.lds
|
||||
vdso-offsets.h
|
||||
|
|
|
@ -42,7 +42,20 @@
|
|||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
static const char *fault_name(unsigned int esr);
|
||||
struct fault_info {
|
||||
int (*fn)(unsigned long addr, unsigned int esr,
|
||||
struct pt_regs *regs);
|
||||
int sig;
|
||||
int code;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
static const struct fault_info fault_info[];
|
||||
|
||||
static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
|
||||
{
|
||||
return fault_info + (esr & 63);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
|
||||
|
@ -197,10 +210,12 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
|
|||
struct pt_regs *regs)
|
||||
{
|
||||
struct siginfo si;
|
||||
const struct fault_info *inf;
|
||||
|
||||
if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
|
||||
inf = esr_to_fault_info(esr);
|
||||
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
|
||||
tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
|
||||
tsk->comm, task_pid_nr(tsk), inf->name, sig,
|
||||
addr, esr);
|
||||
show_pte(tsk->mm, addr);
|
||||
show_regs(regs);
|
||||
|
@ -219,14 +234,16 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
|
|||
{
|
||||
struct task_struct *tsk = current;
|
||||
struct mm_struct *mm = tsk->active_mm;
|
||||
const struct fault_info *inf;
|
||||
|
||||
/*
|
||||
* If we are in kernel mode at this point, we have no context to
|
||||
* handle this fault with.
|
||||
*/
|
||||
if (user_mode(regs))
|
||||
__do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs);
|
||||
else
|
||||
if (user_mode(regs)) {
|
||||
inf = esr_to_fault_info(esr);
|
||||
__do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs);
|
||||
} else
|
||||
__do_kernel_fault(mm, addr, esr, regs);
|
||||
}
|
||||
|
||||
|
@ -488,12 +505,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static const struct fault_info {
|
||||
int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
|
||||
int sig;
|
||||
int code;
|
||||
const char *name;
|
||||
} fault_info[] = {
|
||||
static const struct fault_info fault_info[] = {
|
||||
{ do_bad, SIGBUS, 0, "ttbr address size fault" },
|
||||
{ do_bad, SIGBUS, 0, "level 1 address size fault" },
|
||||
{ do_bad, SIGBUS, 0, "level 2 address size fault" },
|
||||
|
@ -560,19 +572,13 @@ static const struct fault_info {
|
|||
{ do_bad, SIGBUS, 0, "unknown 63" },
|
||||
};
|
||||
|
||||
static const char *fault_name(unsigned int esr)
|
||||
{
|
||||
const struct fault_info *inf = fault_info + (esr & 63);
|
||||
return inf->name;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dispatch a data abort to the relevant handler.
|
||||
*/
|
||||
asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
const struct fault_info *inf = fault_info + (esr & 63);
|
||||
const struct fault_info *inf = esr_to_fault_info(esr);
|
||||
struct siginfo info;
|
||||
|
||||
if (!inf->fn(addr, esr, regs))
|
||||
|
|
|
@ -294,10 +294,6 @@ static __init int setup_hugepagesz(char *opt)
|
|||
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
|
||||
} else if (ps == PUD_SIZE) {
|
||||
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
|
||||
} else if (ps == (PAGE_SIZE * CONT_PTES)) {
|
||||
hugetlb_add_hstate(CONT_PTE_SHIFT);
|
||||
} else if (ps == (PMD_SIZE * CONT_PMDS)) {
|
||||
hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
|
||||
} else {
|
||||
hugetlb_bad_size();
|
||||
pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
|
||||
|
@ -306,13 +302,3 @@ static __init int setup_hugepagesz(char *opt)
|
|||
return 1;
|
||||
}
|
||||
__setup("hugepagesz=", setup_hugepagesz);
|
||||
|
||||
#ifdef CONFIG_ARM64_64K_PAGES
|
||||
static __init int add_default_hugepagesz(void)
|
||||
{
|
||||
if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
|
||||
hugetlb_add_hstate(CONT_PTE_SHIFT);
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(add_default_hugepagesz);
|
||||
#endif
|
||||
|
|
|
@ -70,46 +70,6 @@ static int gpr_get(struct task_struct *target,
|
|||
0, sizeof(*regs));
|
||||
}
|
||||
|
||||
static int gpr_set(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
struct pt_regs *regs = task_pt_regs(target);
|
||||
|
||||
/* Don't copyin TSR or CSR */
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
®s,
|
||||
0, PT_TSR * sizeof(long));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
PT_TSR * sizeof(long),
|
||||
(PT_TSR + 1) * sizeof(long));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
®s,
|
||||
(PT_TSR + 1) * sizeof(long),
|
||||
PT_CSR * sizeof(long));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
PT_CSR * sizeof(long),
|
||||
(PT_CSR + 1) * sizeof(long));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
®s,
|
||||
(PT_CSR + 1) * sizeof(long), -1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
enum c6x_regset {
|
||||
REGSET_GPR,
|
||||
};
|
||||
|
@ -121,7 +81,6 @@ static const struct user_regset c6x_regsets[] = {
|
|||
.size = sizeof(u32),
|
||||
.align = sizeof(u32),
|
||||
.get = gpr_get,
|
||||
.set = gpr_set
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -95,7 +95,8 @@ static int regs_get(struct task_struct *target,
|
|||
long *reg = (long *)®s;
|
||||
|
||||
/* build user regs in buffer */
|
||||
for (r = 0; r < ARRAY_SIZE(register_offset); r++)
|
||||
BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
|
||||
for (r = 0; r < sizeof(regs) / sizeof(long); r++)
|
||||
*reg++ = h8300_get_reg(target, r);
|
||||
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
|
@ -113,7 +114,8 @@ static int regs_set(struct task_struct *target,
|
|||
long *reg;
|
||||
|
||||
/* build user regs in buffer */
|
||||
for (reg = (long *)®s, r = 0; r < ARRAY_SIZE(register_offset); r++)
|
||||
BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
|
||||
for (reg = (long *)®s, r = 0; r < sizeof(regs) / sizeof(long); r++)
|
||||
*reg++ = h8300_get_reg(target, r);
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
|
@ -122,7 +124,7 @@ static int regs_set(struct task_struct *target,
|
|||
return ret;
|
||||
|
||||
/* write back to pt_regs */
|
||||
for (reg = (long *)®s, r = 0; r < ARRAY_SIZE(register_offset); r++)
|
||||
for (reg = (long *)®s, r = 0; r < sizeof(regs) / sizeof(long); r++)
|
||||
h8300_put_reg(target, r, *reg++);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
|
|||
# CONFIG_EFI_PARTITION is not set
|
||||
CONFIG_SYSV68_PARTITION=y
|
||||
CONFIG_IOSCHED_DEADLINE=m
|
||||
CONFIG_MQ_IOSCHED_DEADLINE=m
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_BOOTINFO_PROC=y
|
||||
CONFIG_M68020=y
|
||||
|
@ -60,6 +61,7 @@ CONFIG_NET_IPVTI=m
|
|||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_ESP_OFFLOAD=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
CONFIG_INET_XFRM_MODE_TRANSPORT=m
|
||||
CONFIG_INET_XFRM_MODE_TUNNEL=m
|
||||
|
@ -71,6 +73,7 @@ CONFIG_IPV6=m
|
|||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_ESP_OFFLOAD=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
|
@ -101,6 +104,7 @@ CONFIG_NFT_NUMGEN=m
|
|||
CONFIG_NFT_CT=m
|
||||
CONFIG_NFT_SET_RBTREE=m
|
||||
CONFIG_NFT_SET_HASH=m
|
||||
CONFIG_NFT_SET_BITMAP=m
|
||||
CONFIG_NFT_COUNTER=m
|
||||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
|
@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m
|
|||
CONFIG_NET_L3_MASTER_DEV=y
|
||||
CONFIG_AF_KCM=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
CONFIG_PSAMPLE=m
|
||||
CONFIG_NET_IFE=m
|
||||
CONFIG_NET_DEVLINK=m
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
|
@ -371,6 +377,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
|
|||
CONFIG_MACVLAN=m
|
||||
CONFIG_MACVTAP=m
|
||||
CONFIG_IPVLAN=m
|
||||
CONFIG_IPVTAP=m
|
||||
CONFIG_VXLAN=m
|
||||
CONFIG_GENEVE=m
|
||||
CONFIG_GTP=m
|
||||
|
@ -383,6 +390,7 @@ CONFIG_VETH=m
|
|||
# CONFIG_NET_VENDOR_AMAZON is not set
|
||||
CONFIG_A2065=y
|
||||
CONFIG_ARIADNE=y
|
||||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_CADENCE is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
|
@ -404,7 +412,6 @@ CONFIG_ZORRO8390=y
|
|||
# CONFIG_NET_VENDOR_SOLARFLARE is not set
|
||||
# CONFIG_NET_VENDOR_SMSC is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
|
@ -564,6 +571,8 @@ CONFIG_NLS_MAC_TURKISH=m
|
|||
CONFIG_DLM=m
|
||||
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_WW_MUTEX_SELFTEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=m
|
||||
CONFIG_ASYNC_RAID6_TEST=m
|
||||
CONFIG_TEST_HEXDUMP=m
|
||||
CONFIG_TEST_STRING_HELPERS=m
|
||||
|
@ -594,6 +603,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
|
|||
CONFIG_CRYPTO_LRW=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_KEYWRAP=m
|
||||
CONFIG_CRYPTO_CMAC=m
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
|
@ -605,6 +615,7 @@ CONFIG_CRYPTO_SHA512=m
|
|||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
CONFIG_CRYPTO_BLOWFISH=m
|
||||
CONFIG_CRYPTO_CAMELLIA=m
|
||||
|
@ -629,4 +640,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
|
|||
CONFIG_CRYPTO_USER_API_RNG=m
|
||||
CONFIG_CRYPTO_USER_API_AEAD=m
|
||||
# CONFIG_CRYPTO_HW is not set
|
||||
CONFIG_CRC32_SELFTEST=m
|
||||
CONFIG_XZ_DEC_TEST=m
|
||||
|
|
|
@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
|
|||
# CONFIG_EFI_PARTITION is not set
|
||||
CONFIG_SYSV68_PARTITION=y
|
||||
CONFIG_IOSCHED_DEADLINE=m
|
||||
CONFIG_MQ_IOSCHED_DEADLINE=m
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_BOOTINFO_PROC=y
|
||||
CONFIG_M68020=y
|
||||
|
@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
|
|||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_ESP_OFFLOAD=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
CONFIG_INET_XFRM_MODE_TRANSPORT=m
|
||||
CONFIG_INET_XFRM_MODE_TUNNEL=m
|
||||
|
@ -69,6 +71,7 @@ CONFIG_IPV6=m
|
|||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_ESP_OFFLOAD=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
|
@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
|
|||
CONFIG_NFT_CT=m
|
||||
CONFIG_NFT_SET_RBTREE=m
|
||||
CONFIG_NFT_SET_HASH=m
|
||||
CONFIG_NFT_SET_BITMAP=m
|
||||
CONFIG_NFT_COUNTER=m
|
||||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
|
@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
|
|||
CONFIG_NET_L3_MASTER_DEV=y
|
||||
CONFIG_AF_KCM=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
CONFIG_PSAMPLE=m
|
||||
CONFIG_NET_IFE=m
|
||||
CONFIG_NET_DEVLINK=m
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
|
@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
|
|||
CONFIG_MACVLAN=m
|
||||
CONFIG_MACVTAP=m
|
||||
CONFIG_IPVLAN=m
|
||||
CONFIG_IPVTAP=m
|
||||
CONFIG_VXLAN=m
|
||||
CONFIG_GENEVE=m
|
||||
CONFIG_GTP=m
|
||||
|
@ -362,6 +369,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
|
|||
CONFIG_VETH=m
|
||||
# CONFIG_NET_VENDOR_ALACRITECH is not set
|
||||
# CONFIG_NET_VENDOR_AMAZON is not set
|
||||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_CADENCE is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
|
@ -378,7 +386,6 @@ CONFIG_VETH=m
|
|||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
# CONFIG_NET_VENDOR_SOLARFLARE is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
|
@ -523,6 +530,8 @@ CONFIG_NLS_MAC_TURKISH=m
|
|||
CONFIG_DLM=m
|
||||
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_WW_MUTEX_SELFTEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=m
|
||||
CONFIG_ASYNC_RAID6_TEST=m
|
||||
CONFIG_TEST_HEXDUMP=m
|
||||
CONFIG_TEST_STRING_HELPERS=m
|
||||
|
@ -553,6 +562,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
|
|||
CONFIG_CRYPTO_LRW=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_KEYWRAP=m
|
||||
CONFIG_CRYPTO_CMAC=m
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
|
@ -564,6 +574,7 @@ CONFIG_CRYPTO_SHA512=m
|
|||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
CONFIG_CRYPTO_BLOWFISH=m
|
||||
CONFIG_CRYPTO_CAMELLIA=m
|
||||
|
@ -588,4 +599,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
|
|||
CONFIG_CRYPTO_USER_API_RNG=m
|
||||
CONFIG_CRYPTO_USER_API_AEAD=m
|
||||
# CONFIG_CRYPTO_HW is not set
|
||||
CONFIG_CRC32_SELFTEST=m
|
||||
CONFIG_XZ_DEC_TEST=m
|
||||
|
|
|
@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
|
|||
# CONFIG_EFI_PARTITION is not set
|
||||
CONFIG_SYSV68_PARTITION=y
|
||||
CONFIG_IOSCHED_DEADLINE=m
|
||||
CONFIG_MQ_IOSCHED_DEADLINE=m
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_BOOTINFO_PROC=y
|
||||
CONFIG_M68020=y
|
||||
|
@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
|
|||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_ESP_OFFLOAD=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
CONFIG_INET_XFRM_MODE_TRANSPORT=m
|
||||
CONFIG_INET_XFRM_MODE_TUNNEL=m
|
||||
|
@ -69,6 +71,7 @@ CONFIG_IPV6=m
|
|||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_ESP_OFFLOAD=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
|
@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
|
|||
CONFIG_NFT_CT=m
|
||||
CONFIG_NFT_SET_RBTREE=m
|
||||
CONFIG_NFT_SET_HASH=m
|
||||
CONFIG_NFT_SET_BITMAP=m
|
||||
CONFIG_NFT_COUNTER=m
|
||||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
|
@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
|
|||
CONFIG_NET_L3_MASTER_DEV=y
|
||||
CONFIG_AF_KCM=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
CONFIG_PSAMPLE=m
|
||||
CONFIG_NET_IFE=m
|
||||
CONFIG_NET_DEVLINK=m
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
|
@ -362,6 +368,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
|
|||
CONFIG_MACVLAN=m
|
||||
CONFIG_MACVTAP=m
|
||||
CONFIG_IPVLAN=m
|
||||
CONFIG_IPVTAP=m
|
||||
CONFIG_VXLAN=m
|
||||
CONFIG_GENEVE=m
|
||||
CONFIG_GTP=m
|
||||
|
@ -372,6 +379,7 @@ CONFIG_VETH=m
|
|||
# CONFIG_NET_VENDOR_ALACRITECH is not set
|
||||
# CONFIG_NET_VENDOR_AMAZON is not set
|
||||
CONFIG_ATARILANCE=y
|
||||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_CADENCE is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
|
@ -389,7 +397,6 @@ CONFIG_NE2000=y
|
|||
# CONFIG_NET_VENDOR_SOLARFLARE is not set
|
||||
CONFIG_SMC91X=y
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
|
@ -544,6 +551,8 @@ CONFIG_NLS_MAC_TURKISH=m
|
|||
CONFIG_DLM=m
|
||||
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_WW_MUTEX_SELFTEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=m
|
||||
CONFIG_ASYNC_RAID6_TEST=m
|
||||
CONFIG_TEST_HEXDUMP=m
|
||||
CONFIG_TEST_STRING_HELPERS=m
|
||||
|
@ -574,6 +583,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
|
|||
CONFIG_CRYPTO_LRW=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_KEYWRAP=m
|
||||
CONFIG_CRYPTO_CMAC=m
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
|
@ -585,6 +595,7 @@ CONFIG_CRYPTO_SHA512=m
|
|||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
CONFIG_CRYPTO_BLOWFISH=m
|
||||
CONFIG_CRYPTO_CAMELLIA=m
|
||||
|
@ -609,4 +620,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
|
|||
CONFIG_CRYPTO_USER_API_RNG=m
|
||||
CONFIG_CRYPTO_USER_API_AEAD=m
|
||||
# CONFIG_CRYPTO_HW is not set
|
||||
CONFIG_CRC32_SELFTEST=m
|
||||
CONFIG_XZ_DEC_TEST=m
|
||||
|
|
|
@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
|
|||
CONFIG_SUN_PARTITION=y
|
||||
# CONFIG_EFI_PARTITION is not set
|
||||
CONFIG_IOSCHED_DEADLINE=m
|
||||
CONFIG_MQ_IOSCHED_DEADLINE=m
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_BOOTINFO_PROC=y
|
||||
CONFIG_M68040=y
|
||||
|
@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
|
|||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_ESP_OFFLOAD=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
CONFIG_INET_XFRM_MODE_TRANSPORT=m
|
||||
CONFIG_INET_XFRM_MODE_TUNNEL=m
|
||||
|
@ -67,6 +69,7 @@ CONFIG_IPV6=m
|
|||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_ESP_OFFLOAD=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
|
@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
|
|||
CONFIG_NFT_CT=m
|
||||
CONFIG_NFT_SET_RBTREE=m
|
||||
CONFIG_NFT_SET_HASH=m
|
||||
CONFIG_NFT_SET_BITMAP=m
|
||||
CONFIG_NFT_COUNTER=m
|
||||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
|
@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
|
|||
CONFIG_NET_L3_MASTER_DEV=y
|
||||
CONFIG_AF_KCM=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
CONFIG_PSAMPLE=m
|
||||
CONFIG_NET_IFE=m
|
||||
CONFIG_NET_DEVLINK=m
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
|
@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
|
|||
CONFIG_MACVLAN=m
|
||||
CONFIG_MACVTAP=m
|
||||
CONFIG_IPVLAN=m
|
||||
CONFIG_IPVTAP=m
|
||||
CONFIG_VXLAN=m
|
||||
CONFIG_GENEVE=m
|
||||
CONFIG_GTP=m
|
||||
|
@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
|
|||
CONFIG_VETH=m
|
||||
# CONFIG_NET_VENDOR_ALACRITECH is not set
|
||||
# CONFIG_NET_VENDOR_AMAZON is not set
|
||||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_CADENCE is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
|
@ -377,7 +385,6 @@ CONFIG_BVME6000_NET=y
|
|||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
# CONFIG_NET_VENDOR_SOLARFLARE is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
|
@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
|
|||
CONFIG_DLM=m
|
||||
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_WW_MUTEX_SELFTEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=m
|
||||
CONFIG_ASYNC_RAID6_TEST=m
|
||||
CONFIG_TEST_HEXDUMP=m
|
||||
CONFIG_TEST_STRING_HELPERS=m
|
||||
|
@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
|
|||
CONFIG_CRYPTO_LRW=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_KEYWRAP=m
|
||||
CONFIG_CRYPTO_CMAC=m
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
|
@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
|
|||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
CONFIG_CRYPTO_BLOWFISH=m
|
||||
CONFIG_CRYPTO_CAMELLIA=m
|
||||
|
@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
|
|||
CONFIG_CRYPTO_USER_API_RNG=m
|
||||
CONFIG_CRYPTO_USER_API_AEAD=m
|
||||
# CONFIG_CRYPTO_HW is not set
|
||||
CONFIG_CRC32_SELFTEST=m
|
||||
CONFIG_XZ_DEC_TEST=m
|
||||
|
|
|
@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
|
|||
# CONFIG_EFI_PARTITION is not set
|
||||
CONFIG_SYSV68_PARTITION=y
|
||||
CONFIG_IOSCHED_DEADLINE=m
|
||||
CONFIG_MQ_IOSCHED_DEADLINE=m
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_BOOTINFO_PROC=y
|
||||
CONFIG_M68020=y
|
||||
|
@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
|
|||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_ESP_OFFLOAD=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
CONFIG_INET_XFRM_MODE_TRANSPORT=m
|
||||
CONFIG_INET_XFRM_MODE_TUNNEL=m
|
||||
|
@ -69,6 +71,7 @@ CONFIG_IPV6=m
|
|||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_ESP_OFFLOAD=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
|
@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
|
|||
CONFIG_NFT_CT=m
|
||||
CONFIG_NFT_SET_RBTREE=m
|
||||
CONFIG_NFT_SET_HASH=m
|
||||
CONFIG_NFT_SET_BITMAP=m
|
||||
CONFIG_NFT_COUNTER=m
|
||||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
|
@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
|
|||
CONFIG_NET_L3_MASTER_DEV=y
|
||||
CONFIG_AF_KCM=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
CONFIG_PSAMPLE=m
|
||||
CONFIG_NET_IFE=m
|
||||
CONFIG_NET_DEVLINK=m
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
|
@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
|
|||
CONFIG_MACVLAN=m
|
||||
CONFIG_MACVTAP=m
|
||||
CONFIG_IPVLAN=m
|
||||
CONFIG_IPVTAP=m
|
||||
CONFIG_VXLAN=m
|
||||
CONFIG_GENEVE=m
|
||||
CONFIG_GTP=m
|
||||
|
@ -363,6 +370,7 @@ CONFIG_VETH=m
|
|||
# CONFIG_NET_VENDOR_ALACRITECH is not set
|
||||
# CONFIG_NET_VENDOR_AMAZON is not set
|
||||
CONFIG_HPLANCE=y
|
||||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_CADENCE is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
|
@ -379,7 +387,6 @@ CONFIG_HPLANCE=y
|
|||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
# CONFIG_NET_VENDOR_SOLARFLARE is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
|
@ -525,6 +532,8 @@ CONFIG_NLS_MAC_TURKISH=m
|
|||
CONFIG_DLM=m
|
||||
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_WW_MUTEX_SELFTEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=m
|
||||
CONFIG_ASYNC_RAID6_TEST=m
|
||||
CONFIG_TEST_HEXDUMP=m
|
||||
CONFIG_TEST_STRING_HELPERS=m
|
||||
|
@ -555,6 +564,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
|
|||
CONFIG_CRYPTO_LRW=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_KEYWRAP=m
|
||||
CONFIG_CRYPTO_CMAC=m
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
|
@ -566,6 +576,7 @@ CONFIG_CRYPTO_SHA512=m
|
|||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
CONFIG_CRYPTO_BLOWFISH=m
|
||||
CONFIG_CRYPTO_CAMELLIA=m
|
||||
|
@ -590,4 +601,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
|
|||
CONFIG_CRYPTO_USER_API_RNG=m
|
||||
CONFIG_CRYPTO_USER_API_AEAD=m
|
||||
# CONFIG_CRYPTO_HW is not set
|
||||
CONFIG_CRC32_SELFTEST=m
|
||||
CONFIG_XZ_DEC_TEST=m
|
||||
|
|
|
@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
|
|||
# CONFIG_EFI_PARTITION is not set
|
||||
CONFIG_SYSV68_PARTITION=y
|
||||
CONFIG_IOSCHED_DEADLINE=m
|
||||
CONFIG_MQ_IOSCHED_DEADLINE=m
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_BOOTINFO_PROC=y
|
||||
CONFIG_M68020=y
|
||||
|
@ -57,6 +58,7 @@ CONFIG_NET_IPVTI=m
|
|||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_ESP_OFFLOAD=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
CONFIG_INET_XFRM_MODE_TRANSPORT=m
|
||||
CONFIG_INET_XFRM_MODE_TUNNEL=m
|
||||
|
@ -68,6 +70,7 @@ CONFIG_IPV6=m
|
|||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_ESP_OFFLOAD=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
|
@ -98,6 +101,7 @@ CONFIG_NFT_NUMGEN=m
|
|||
CONFIG_NFT_CT=m
|
||||
CONFIG_NFT_SET_RBTREE=m
|
||||
CONFIG_NFT_SET_HASH=m
|
||||
CONFIG_NFT_SET_BITMAP=m
|
||||
CONFIG_NFT_COUNTER=m
|
||||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
|
@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m
|
|||
CONFIG_NET_L3_MASTER_DEV=y
|
||||
CONFIG_AF_KCM=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
CONFIG_PSAMPLE=m
|
||||
CONFIG_NET_IFE=m
|
||||
CONFIG_NET_DEVLINK=m
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
|
@ -369,6 +375,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
|
|||
CONFIG_MACVLAN=m
|
||||
CONFIG_MACVTAP=m
|
||||
CONFIG_IPVLAN=m
|
||||
CONFIG_IPVTAP=m
|
||||
CONFIG_VXLAN=m
|
||||
CONFIG_GENEVE=m
|
||||
CONFIG_GTP=m
|
||||
|
@ -379,6 +386,7 @@ CONFIG_VETH=m
|
|||
# CONFIG_NET_VENDOR_ALACRITECH is not set
|
||||
# CONFIG_NET_VENDOR_AMAZON is not set
|
||||
CONFIG_MACMACE=y
|
||||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_CADENCE is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
|
@ -398,7 +406,6 @@ CONFIG_MAC8390=y
|
|||
# CONFIG_NET_VENDOR_SOLARFLARE is not set
|
||||
# CONFIG_NET_VENDOR_SMSC is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
|
@ -547,6 +554,8 @@ CONFIG_NLS_MAC_TURKISH=m
|
|||
CONFIG_DLM=m
|
||||
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_WW_MUTEX_SELFTEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=m
|
||||
CONFIG_ASYNC_RAID6_TEST=m
|
||||
CONFIG_TEST_HEXDUMP=m
|
||||
CONFIG_TEST_STRING_HELPERS=m
|
||||
|
@ -577,6 +586,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
|
|||
CONFIG_CRYPTO_LRW=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_KEYWRAP=m
|
||||
CONFIG_CRYPTO_CMAC=m
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
|
@ -588,6 +598,7 @@ CONFIG_CRYPTO_SHA512=m
|
|||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
CONFIG_CRYPTO_BLOWFISH=m
|
||||
CONFIG_CRYPTO_CAMELLIA=m
|
||||
|
@ -612,4 +623,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
|
|||
CONFIG_CRYPTO_USER_API_RNG=m
|
||||
CONFIG_CRYPTO_USER_API_AEAD=m
|
||||
# CONFIG_CRYPTO_HW is not set
|
||||
CONFIG_CRC32_SELFTEST=m
|
||||
CONFIG_XZ_DEC_TEST=m
|
||||
|
|
|
@ -21,6 +21,7 @@ CONFIG_SOLARIS_X86_PARTITION=y
|
|||
CONFIG_UNIXWARE_DISKLABEL=y
|
||||
# CONFIG_EFI_PARTITION is not set
|
||||
CONFIG_IOSCHED_DEADLINE=m
|
||||
CONFIG_MQ_IOSCHED_DEADLINE=m
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_BOOTINFO_PROC=y
|
||||
CONFIG_M68020=y
|
||||
|
@ -67,6 +68,7 @@ CONFIG_NET_IPVTI=m
|
|||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_ESP_OFFLOAD=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
CONFIG_INET_XFRM_MODE_TRANSPORT=m
|
||||
CONFIG_INET_XFRM_MODE_TUNNEL=m
|
||||
|
@ -78,6 +80,7 @@ CONFIG_IPV6=m
|
|||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_ESP_OFFLOAD=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
|
@ -108,6 +111,7 @@ CONFIG_NFT_NUMGEN=m
|
|||
CONFIG_NFT_CT=m
|
||||
CONFIG_NFT_SET_RBTREE=m
|
||||
CONFIG_NFT_SET_HASH=m
|
||||
CONFIG_NFT_SET_BITMAP=m
|
||||
CONFIG_NFT_COUNTER=m
|
||||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
|
@ -308,6 +312,8 @@ CONFIG_MPLS_IPTUNNEL=m
|
|||
CONFIG_NET_L3_MASTER_DEV=y
|
||||
CONFIG_AF_KCM=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
CONFIG_PSAMPLE=m
|
||||
CONFIG_NET_IFE=m
|
||||
CONFIG_NET_DEVLINK=m
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
|
@ -402,6 +408,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
|
|||
CONFIG_MACVLAN=m
|
||||
CONFIG_MACVTAP=m
|
||||
CONFIG_IPVLAN=m
|
||||
CONFIG_IPVTAP=m
|
||||
CONFIG_VXLAN=m
|
||||
CONFIG_GENEVE=m
|
||||
CONFIG_GTP=m
|
||||
|
@ -419,6 +426,7 @@ CONFIG_HPLANCE=y
|
|||
CONFIG_MVME147_NET=y
|
||||
CONFIG_SUN3LANCE=y
|
||||
CONFIG_MACMACE=y
|
||||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_CADENCE is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
|
@ -444,7 +452,6 @@ CONFIG_ZORRO8390=y
|
|||
# CONFIG_NET_VENDOR_SOLARFLARE is not set
|
||||
CONFIG_SMC91X=y
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PLIP=m
|
||||
|
@ -627,6 +634,8 @@ CONFIG_NLS_MAC_TURKISH=m
|
|||
CONFIG_DLM=m
|
||||
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_WW_MUTEX_SELFTEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=m
|
||||
CONFIG_ASYNC_RAID6_TEST=m
|
||||
CONFIG_TEST_HEXDUMP=m
|
||||
CONFIG_TEST_STRING_HELPERS=m
|
||||
|
@ -657,6 +666,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
|
|||
CONFIG_CRYPTO_LRW=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_KEYWRAP=m
|
||||
CONFIG_CRYPTO_CMAC=m
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
|
@ -668,6 +678,7 @@ CONFIG_CRYPTO_SHA512=m
|
|||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
CONFIG_CRYPTO_BLOWFISH=m
|
||||
CONFIG_CRYPTO_CAMELLIA=m
|
||||
|
@ -692,4 +703,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
|
|||
CONFIG_CRYPTO_USER_API_RNG=m
|
||||
CONFIG_CRYPTO_USER_API_AEAD=m
|
||||
# CONFIG_CRYPTO_HW is not set
|
||||
CONFIG_CRC32_SELFTEST=m
|
||||
CONFIG_XZ_DEC_TEST=m
|
||||
|
|
|
@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
|
|||
CONFIG_SUN_PARTITION=y
|
||||
# CONFIG_EFI_PARTITION is not set
|
||||
CONFIG_IOSCHED_DEADLINE=m
|
||||
CONFIG_MQ_IOSCHED_DEADLINE=m
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_BOOTINFO_PROC=y
|
||||
CONFIG_M68030=y
|
||||
|
@ -55,6 +56,7 @@ CONFIG_NET_IPVTI=m
|
|||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_ESP_OFFLOAD=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
CONFIG_INET_XFRM_MODE_TRANSPORT=m
|
||||
CONFIG_INET_XFRM_MODE_TUNNEL=m
|
||||
|
@ -66,6 +68,7 @@ CONFIG_IPV6=m
|
|||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_ESP_OFFLOAD=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
|
@ -96,6 +99,7 @@ CONFIG_NFT_NUMGEN=m
|
|||
CONFIG_NFT_CT=m
|
||||
CONFIG_NFT_SET_RBTREE=m
|
||||
CONFIG_NFT_SET_HASH=m
|
||||
CONFIG_NFT_SET_BITMAP=m
|
||||
CONFIG_NFT_COUNTER=m
|
||||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
|
@ -293,6 +297,8 @@ CONFIG_MPLS_IPTUNNEL=m
|
|||
CONFIG_NET_L3_MASTER_DEV=y
|
||||
CONFIG_AF_KCM=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
CONFIG_PSAMPLE=m
|
||||
CONFIG_NET_IFE=m
|
||||
CONFIG_NET_DEVLINK=m
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
|
@ -351,6 +357,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
|
|||
CONFIG_MACVLAN=m
|
||||
CONFIG_MACVTAP=m
|
||||
CONFIG_IPVLAN=m
|
||||
CONFIG_IPVTAP=m
|
||||
CONFIG_VXLAN=m
|
||||
CONFIG_GENEVE=m
|
||||
CONFIG_GTP=m
|
||||
|
@ -361,6 +368,7 @@ CONFIG_VETH=m
|
|||
# CONFIG_NET_VENDOR_ALACRITECH is not set
|
||||
# CONFIG_NET_VENDOR_AMAZON is not set
|
||||
CONFIG_MVME147_NET=y
|
||||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_CADENCE is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
|
@ -377,7 +385,6 @@ CONFIG_MVME147_NET=y
|
|||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
# CONFIG_NET_VENDOR_SOLARFLARE is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
|
@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
|
|||
CONFIG_DLM=m
|
||||
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_WW_MUTEX_SELFTEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=m
|
||||
CONFIG_ASYNC_RAID6_TEST=m
|
||||
CONFIG_TEST_HEXDUMP=m
|
||||
CONFIG_TEST_STRING_HELPERS=m
|
||||
|
@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
|
|||
CONFIG_CRYPTO_LRW=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_KEYWRAP=m
|
||||
CONFIG_CRYPTO_CMAC=m
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
|
@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
|
|||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
CONFIG_CRYPTO_BLOWFISH=m
|
||||
CONFIG_CRYPTO_CAMELLIA=m
|
||||
|
@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
|
|||
CONFIG_CRYPTO_USER_API_RNG=m
|
||||
CONFIG_CRYPTO_USER_API_AEAD=m
|
||||
# CONFIG_CRYPTO_HW is not set
|
||||
CONFIG_CRC32_SELFTEST=m
|
||||
CONFIG_XZ_DEC_TEST=m
|
||||
|
|
|
@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
|
|||
CONFIG_SUN_PARTITION=y
|
||||
# CONFIG_EFI_PARTITION is not set
|
||||
CONFIG_IOSCHED_DEADLINE=m
|
||||
CONFIG_MQ_IOSCHED_DEADLINE=m
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_BOOTINFO_PROC=y
|
||||
CONFIG_M68040=y
|
||||
|
@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
|
|||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_ESP_OFFLOAD=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
CONFIG_INET_XFRM_MODE_TRANSPORT=m
|
||||
CONFIG_INET_XFRM_MODE_TUNNEL=m
|
||||
|
@ -67,6 +69,7 @@ CONFIG_IPV6=m
|
|||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_ESP_OFFLOAD=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
|
@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
|
|||
CONFIG_NFT_CT=m
|
||||
CONFIG_NFT_SET_RBTREE=m
|
||||
CONFIG_NFT_SET_HASH=m
|
||||
CONFIG_NFT_SET_BITMAP=m
|
||||
CONFIG_NFT_COUNTER=m
|
||||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
|
@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
|
|||
CONFIG_NET_L3_MASTER_DEV=y
|
||||
CONFIG_AF_KCM=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
CONFIG_PSAMPLE=m
|
||||
CONFIG_NET_IFE=m
|
||||
CONFIG_NET_DEVLINK=m
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
|
@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
|
|||
CONFIG_MACVLAN=m
|
||||
CONFIG_MACVTAP=m
|
||||
CONFIG_IPVLAN=m
|
||||
CONFIG_IPVTAP=m
|
||||
CONFIG_VXLAN=m
|
||||
CONFIG_GENEVE=m
|
||||
CONFIG_GTP=m
|
||||
|
@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
|
|||
CONFIG_VETH=m
|
||||
# CONFIG_NET_VENDOR_ALACRITECH is not set
|
||||
# CONFIG_NET_VENDOR_AMAZON is not set
|
||||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_CADENCE is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
|
@ -377,7 +385,6 @@ CONFIG_MVME16x_NET=y
|
|||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
# CONFIG_NET_VENDOR_SOLARFLARE is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
|
@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
|
|||
CONFIG_DLM=m
|
||||
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_WW_MUTEX_SELFTEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=m
|
||||
CONFIG_ASYNC_RAID6_TEST=m
|
||||
CONFIG_TEST_HEXDUMP=m
|
||||
CONFIG_TEST_STRING_HELPERS=m
|
||||
|
@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
|
|||
CONFIG_CRYPTO_LRW=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_KEYWRAP=m
|
||||
CONFIG_CRYPTO_CMAC=m
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
|
@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
|
|||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
CONFIG_CRYPTO_BLOWFISH=m
|
||||
CONFIG_CRYPTO_CAMELLIA=m
|
||||
|
@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
|
|||
CONFIG_CRYPTO_USER_API_RNG=m
|
||||
CONFIG_CRYPTO_USER_API_AEAD=m
|
||||
# CONFIG_CRYPTO_HW is not set
|
||||
CONFIG_CRC32_SELFTEST=m
|
||||
CONFIG_XZ_DEC_TEST=m
|
||||
|
|
|
@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
|
|||
# CONFIG_EFI_PARTITION is not set
|
||||
CONFIG_SYSV68_PARTITION=y
|
||||
CONFIG_IOSCHED_DEADLINE=m
|
||||
CONFIG_MQ_IOSCHED_DEADLINE=m
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_BOOTINFO_PROC=y
|
||||
CONFIG_M68040=y
|
||||
|
@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
|
|||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_ESP_OFFLOAD=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
CONFIG_INET_XFRM_MODE_TRANSPORT=m
|
||||
CONFIG_INET_XFRM_MODE_TUNNEL=m
|
||||
|
@ -67,6 +69,7 @@ CONFIG_IPV6=m
|
|||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_ESP_OFFLOAD=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
|
@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
|
|||
CONFIG_NFT_CT=m
|
||||
CONFIG_NFT_SET_RBTREE=m
|
||||
CONFIG_NFT_SET_HASH=m
|
||||
CONFIG_NFT_SET_BITMAP=m
|
||||
CONFIG_NFT_COUNTER=m
|
||||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
|
@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
|
|||
CONFIG_NET_L3_MASTER_DEV=y
|
||||
CONFIG_AF_KCM=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
CONFIG_PSAMPLE=m
|
||||
CONFIG_NET_IFE=m
|
||||
CONFIG_NET_DEVLINK=m
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
|
@ -358,6 +364,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
|
|||
CONFIG_MACVLAN=m
|
||||
CONFIG_MACVTAP=m
|
||||
CONFIG_IPVLAN=m
|
||||
CONFIG_IPVTAP=m
|
||||
CONFIG_VXLAN=m
|
||||
CONFIG_GENEVE=m
|
||||
CONFIG_GTP=m
|
||||
|
@ -369,6 +376,7 @@ CONFIG_VETH=m
|
|||
# CONFIG_NET_VENDOR_ALACRITECH is not set
|
||||
# CONFIG_NET_VENDOR_AMAZON is not set
|
||||
# CONFIG_NET_VENDOR_AMD is not set
|
||||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_CADENCE is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
|
@ -388,7 +396,6 @@ CONFIG_NE2000=y
|
|||
# CONFIG_NET_VENDOR_SOLARFLARE is not set
|
||||
# CONFIG_NET_VENDOR_SMSC is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PLIP=m
|
||||
|
@ -538,6 +545,8 @@ CONFIG_NLS_MAC_TURKISH=m
|
|||
CONFIG_DLM=m
|
||||
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_WW_MUTEX_SELFTEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=m
|
||||
CONFIG_ASYNC_RAID6_TEST=m
|
||||
CONFIG_TEST_HEXDUMP=m
|
||||
CONFIG_TEST_STRING_HELPERS=m
|
||||
|
@ -568,6 +577,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
|
|||
CONFIG_CRYPTO_LRW=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_KEYWRAP=m
|
||||
CONFIG_CRYPTO_CMAC=m
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
|
@ -579,6 +589,7 @@ CONFIG_CRYPTO_SHA512=m
|
|||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
CONFIG_CRYPTO_BLOWFISH=m
|
||||
CONFIG_CRYPTO_CAMELLIA=m
|
||||
|
@ -603,4 +614,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
|
|||
CONFIG_CRYPTO_USER_API_RNG=m
|
||||
CONFIG_CRYPTO_USER_API_AEAD=m
|
||||
# CONFIG_CRYPTO_HW is not set
|
||||
CONFIG_CRC32_SELFTEST=m
|
||||
CONFIG_XZ_DEC_TEST=m
|
||||
|
|
|
@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
|
|||
# CONFIG_EFI_PARTITION is not set
|
||||
CONFIG_SYSV68_PARTITION=y
|
||||
CONFIG_IOSCHED_DEADLINE=m
|
||||
CONFIG_MQ_IOSCHED_DEADLINE=m
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_BOOTINFO_PROC=y
|
||||
CONFIG_SUN3=y
|
||||
|
@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m
|
|||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_ESP_OFFLOAD=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
CONFIG_INET_XFRM_MODE_TRANSPORT=m
|
||||
CONFIG_INET_XFRM_MODE_TUNNEL=m
|
||||
|
@ -64,6 +66,7 @@ CONFIG_IPV6=m
|
|||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_ESP_OFFLOAD=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
|
@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m
|
|||
CONFIG_NFT_CT=m
|
||||
CONFIG_NFT_SET_RBTREE=m
|
||||
CONFIG_NFT_SET_HASH=m
|
||||
CONFIG_NFT_SET_BITMAP=m
|
||||
CONFIG_NFT_COUNTER=m
|
||||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
|
@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m
|
|||
CONFIG_NET_L3_MASTER_DEV=y
|
||||
CONFIG_AF_KCM=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
CONFIG_PSAMPLE=m
|
||||
CONFIG_NET_IFE=m
|
||||
CONFIG_NET_DEVLINK=m
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
|
@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
|
|||
CONFIG_MACVLAN=m
|
||||
CONFIG_MACVTAP=m
|
||||
CONFIG_IPVLAN=m
|
||||
CONFIG_IPVTAP=m
|
||||
CONFIG_VXLAN=m
|
||||
CONFIG_GENEVE=m
|
||||
CONFIG_GTP=m
|
||||
|
@ -359,6 +366,7 @@ CONFIG_VETH=m
|
|||
# CONFIG_NET_VENDOR_ALACRITECH is not set
|
||||
# CONFIG_NET_VENDOR_AMAZON is not set
|
||||
CONFIG_SUN3LANCE=y
|
||||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_CADENCE is not set
|
||||
# CONFIG_NET_VENDOR_EZCHIP is not set
|
||||
|
@ -375,7 +383,6 @@ CONFIG_SUN3_82586=y
|
|||
# CONFIG_NET_VENDOR_SOLARFLARE is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SUN is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
|
@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m
|
|||
CONFIG_DLM=m
|
||||
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_WW_MUTEX_SELFTEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=m
|
||||
CONFIG_ASYNC_RAID6_TEST=m
|
||||
CONFIG_TEST_HEXDUMP=m
|
||||
CONFIG_TEST_STRING_HELPERS=m
|
||||
|
@ -546,6 +555,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
|
|||
CONFIG_CRYPTO_LRW=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_KEYWRAP=m
|
||||
CONFIG_CRYPTO_CMAC=m
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
|
@ -557,6 +567,7 @@ CONFIG_CRYPTO_SHA512=m
|
|||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
CONFIG_CRYPTO_BLOWFISH=m
|
||||
CONFIG_CRYPTO_CAMELLIA=m
|
||||
|
@ -581,4 +592,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
|
|||
CONFIG_CRYPTO_USER_API_RNG=m
|
||||
CONFIG_CRYPTO_USER_API_AEAD=m
|
||||
# CONFIG_CRYPTO_HW is not set
|
||||
CONFIG_CRC32_SELFTEST=m
|
||||
CONFIG_XZ_DEC_TEST=m
|
||||
|
|
|
@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
|
|||
# CONFIG_EFI_PARTITION is not set
|
||||
CONFIG_SYSV68_PARTITION=y
|
||||
CONFIG_IOSCHED_DEADLINE=m
|
||||
CONFIG_MQ_IOSCHED_DEADLINE=m
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_BOOTINFO_PROC=y
|
||||
CONFIG_SUN3X=y
|
||||
|
@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m
|
|||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_ESP_OFFLOAD=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
CONFIG_INET_XFRM_MODE_TRANSPORT=m
|
||||
CONFIG_INET_XFRM_MODE_TUNNEL=m
|
||||
|
@ -64,6 +66,7 @@ CONFIG_IPV6=m
|
|||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_ESP_OFFLOAD=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
|
@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m
|
|||
CONFIG_NFT_CT=m
|
||||
CONFIG_NFT_SET_RBTREE=m
|
||||
CONFIG_NFT_SET_HASH=m
|
||||
CONFIG_NFT_SET_BITMAP=m
|
||||
CONFIG_NFT_COUNTER=m
|
||||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
|
@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m
|
|||
CONFIG_NET_L3_MASTER_DEV=y
|
||||
CONFIG_AF_KCM=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
CONFIG_PSAMPLE=m
|
||||
CONFIG_NET_IFE=m
|
||||
CONFIG_NET_DEVLINK=m
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
|
@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
|
|||
CONFIG_MACVLAN=m
|
||||
CONFIG_MACVTAP=m
|
||||
CONFIG_IPVLAN=m
|
||||
CONFIG_IPVTAP=m
|
||||
CONFIG_VXLAN=m
|
||||
CONFIG_GENEVE=m
|
||||
CONFIG_GTP=m
|
||||
|
@ -359,6 +366,7 @@ CONFIG_VETH=m
|
|||
# CONFIG_NET_VENDOR_ALACRITECH is not set
|
||||
# CONFIG_NET_VENDOR_AMAZON is not set
|
||||
CONFIG_SUN3LANCE=y
|
||||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_CADENCE is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
|
@ -375,7 +383,6 @@ CONFIG_SUN3LANCE=y
|
|||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
# CONFIG_NET_VENDOR_SOLARFLARE is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
|
@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m
|
|||
CONFIG_DLM=m
|
||||
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_WW_MUTEX_SELFTEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=m
|
||||
CONFIG_ASYNC_RAID6_TEST=m
|
||||
CONFIG_TEST_HEXDUMP=m
|
||||
CONFIG_TEST_STRING_HELPERS=m
|
||||
|
@ -547,6 +556,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
|
|||
CONFIG_CRYPTO_LRW=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_KEYWRAP=m
|
||||
CONFIG_CRYPTO_CMAC=m
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
|
@ -558,6 +568,7 @@ CONFIG_CRYPTO_SHA512=m
|
|||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
CONFIG_CRYPTO_BLOWFISH=m
|
||||
CONFIG_CRYPTO_CAMELLIA=m
|
||||
|
@ -582,4 +593,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
|
|||
CONFIG_CRYPTO_USER_API_RNG=m
|
||||
CONFIG_CRYPTO_USER_API_AEAD=m
|
||||
# CONFIG_CRYPTO_HW is not set
|
||||
CONFIG_CRC32_SELFTEST=m
|
||||
CONFIG_XZ_DEC_TEST=m
|
||||
|
|
|
@ -148,7 +148,7 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
|
|||
#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
|
||||
|
||||
|
||||
static inline int test_bit(int nr, const unsigned long *vaddr)
|
||||
static inline int test_bit(int nr, const volatile unsigned long *vaddr)
|
||||
{
|
||||
return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#include <uapi/asm/unistd.h>
|
||||
|
||||
|
||||
#define NR_syscalls 379
|
||||
#define NR_syscalls 380
|
||||
|
||||
#define __ARCH_WANT_OLD_READDIR
|
||||
#define __ARCH_WANT_OLD_STAT
|
||||
|
|
|
@ -384,5 +384,6 @@
|
|||
#define __NR_copy_file_range 376
|
||||
#define __NR_preadv2 377
|
||||
#define __NR_pwritev2 378
|
||||
#define __NR_statx 379
|
||||
|
||||
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
|
||||
|
|
|
@ -399,3 +399,4 @@ ENTRY(sys_call_table)
|
|||
.long sys_copy_file_range
|
||||
.long sys_preadv2
|
||||
.long sys_pwritev2
|
||||
.long sys_statx
|
||||
|
|
|
@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
|
|||
|
||||
#define strlen_user(str) strnlen_user(str, 32767)
|
||||
|
||||
extern unsigned long __must_check __copy_user_zeroing(void *to,
|
||||
const void __user *from,
|
||||
extern unsigned long raw_copy_from_user(void *to, const void __user *from,
|
||||
unsigned long n);
|
||||
|
||||
static inline unsigned long
|
||||
copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
unsigned long res = n;
|
||||
if (likely(access_ok(VERIFY_READ, from, n)))
|
||||
return __copy_user_zeroing(to, from, n);
|
||||
memset(to, 0, n);
|
||||
return n;
|
||||
res = raw_copy_from_user(to, from, n);
|
||||
if (unlikely(res))
|
||||
memset(to + (n - res), 0, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
|
||||
#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
|
||||
#define __copy_from_user_inatomic __copy_from_user
|
||||
|
||||
extern unsigned long __must_check __copy_user(void __user *to,
|
||||
|
|
|
@ -26,6 +26,16 @@
|
|||
* user_regset definitions.
|
||||
*/
|
||||
|
||||
static unsigned long user_txstatus(const struct pt_regs *regs)
|
||||
{
|
||||
unsigned long data = (unsigned long)regs->ctx.Flags;
|
||||
|
||||
if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
|
||||
data |= USER_GP_REGS_STATUS_CATCH_BIT;
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
int metag_gp_regs_copyout(const struct pt_regs *regs,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
|
@ -64,9 +74,7 @@ int metag_gp_regs_copyout(const struct pt_regs *regs,
|
|||
if (ret)
|
||||
goto out;
|
||||
/* TXSTATUS */
|
||||
data = (unsigned long)regs->ctx.Flags;
|
||||
if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
|
||||
data |= USER_GP_REGS_STATUS_CATCH_BIT;
|
||||
data = user_txstatus(regs);
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&data, 4*25, 4*26);
|
||||
if (ret)
|
||||
|
@ -121,6 +129,7 @@ int metag_gp_regs_copyin(struct pt_regs *regs,
|
|||
if (ret)
|
||||
goto out;
|
||||
/* TXSTATUS */
|
||||
data = user_txstatus(regs);
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&data, 4*25, 4*26);
|
||||
if (ret)
|
||||
|
@ -246,6 +255,8 @@ int metag_rp_state_copyin(struct pt_regs *regs,
|
|||
unsigned long long *ptr;
|
||||
int ret, i;
|
||||
|
||||
if (count < 4*13)
|
||||
return -EINVAL;
|
||||
/* Read the entire pipeline before making any changes */
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&rp, 0, 4*13);
|
||||
|
@ -305,7 +316,7 @@ static int metag_tls_set(struct task_struct *target,
|
|||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
void __user *tls;
|
||||
void __user *tls = target->thread.tls_ptr;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
|
||||
if (ret)
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
COPY \
|
||||
"1:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" MOV D1Ar1,#0\n" \
|
||||
FIXUP \
|
||||
" MOVT D1Ar1,#HI(1b)\n" \
|
||||
" JUMP D1Ar1,#LO(1b)\n" \
|
||||
|
@ -260,27 +259,31 @@
|
|||
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||
"22:\n" \
|
||||
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||
"SUB %3, %3, #32\n" \
|
||||
"23:\n" \
|
||||
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||
"SUB %3, %3, #32\n" \
|
||||
"24:\n" \
|
||||
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||
"25:\n" \
|
||||
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||
"26:\n" \
|
||||
"SUB %3, %3, #32\n" \
|
||||
"DCACHE [%1+#-64], D0Ar6\n" \
|
||||
"BR $Lloop"id"\n" \
|
||||
\
|
||||
"MOV RAPF, %1\n" \
|
||||
"25:\n" \
|
||||
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||
"26:\n" \
|
||||
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||
"SUB %3, %3, #32\n" \
|
||||
"27:\n" \
|
||||
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||
"28:\n" \
|
||||
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||
"SUB %0, %0, #8\n" \
|
||||
"29:\n" \
|
||||
"SUB %3, %3, #32\n" \
|
||||
"30:\n" \
|
||||
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||
"31:\n" \
|
||||
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||
"32:\n" \
|
||||
"SUB %0, %0, #8\n" \
|
||||
"33:\n" \
|
||||
"SETL [%0++], D0.7, D1.7\n" \
|
||||
"SUB %3, %3, #32\n" \
|
||||
"1:" \
|
||||
|
@ -312,11 +315,15 @@
|
|||
" .long 26b,3b\n" \
|
||||
" .long 27b,3b\n" \
|
||||
" .long 28b,3b\n" \
|
||||
" .long 29b,4b\n" \
|
||||
" .long 29b,3b\n" \
|
||||
" .long 30b,3b\n" \
|
||||
" .long 31b,3b\n" \
|
||||
" .long 32b,3b\n" \
|
||||
" .long 33b,4b\n" \
|
||||
" .previous\n" \
|
||||
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
|
||||
: "0" (to), "1" (from), "2" (ret), "3" (n) \
|
||||
: "D1Ar1", "D0Ar2", "memory")
|
||||
: "D1Ar1", "D0Ar2", "cc", "memory")
|
||||
|
||||
/* rewind 'to' and 'from' pointers when a fault occurs
|
||||
*
|
||||
|
@ -342,7 +349,7 @@
|
|||
#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
|
||||
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
|
||||
"LSR D0Ar2, D0Ar2, #8\n" \
|
||||
"AND D0Ar2, D0Ar2, #0x7\n" \
|
||||
"ANDS D0Ar2, D0Ar2, #0x7\n" \
|
||||
"ADDZ D0Ar2, D0Ar2, #4\n" \
|
||||
"SUB D0Ar2, D0Ar2, #1\n" \
|
||||
"MOV D1Ar1, #4\n" \
|
||||
|
@ -403,47 +410,55 @@
|
|||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||
"22:\n" \
|
||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||
"SUB %3, %3, #16\n" \
|
||||
"23:\n" \
|
||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||
"24:\n" \
|
||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||
"SUB %3, %3, #16\n" \
|
||||
"25:\n" \
|
||||
"24:\n" \
|
||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||
"26:\n" \
|
||||
"25:\n" \
|
||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||
"26:\n" \
|
||||
"SUB %3, %3, #16\n" \
|
||||
"27:\n" \
|
||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||
"28:\n" \
|
||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||
"29:\n" \
|
||||
"SUB %3, %3, #16\n" \
|
||||
"30:\n" \
|
||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||
"31:\n" \
|
||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||
"32:\n" \
|
||||
"SUB %3, %3, #16\n" \
|
||||
"DCACHE [%1+#-64], D0Ar6\n" \
|
||||
"BR $Lloop"id"\n" \
|
||||
\
|
||||
"MOV RAPF, %1\n" \
|
||||
"29:\n" \
|
||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||
"30:\n" \
|
||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||
"SUB %3, %3, #16\n" \
|
||||
"31:\n" \
|
||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||
"32:\n" \
|
||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||
"SUB %3, %3, #16\n" \
|
||||
"33:\n" \
|
||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||
"34:\n" \
|
||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||
"SUB %3, %3, #16\n" \
|
||||
"35:\n" \
|
||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||
"SUB %3, %3, #16\n" \
|
||||
"36:\n" \
|
||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||
"SUB %0, %0, #4\n" \
|
||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||
"37:\n" \
|
||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||
"38:\n" \
|
||||
"SUB %3, %3, #16\n" \
|
||||
"39:\n" \
|
||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||
"40:\n" \
|
||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||
"41:\n" \
|
||||
"SUB %3, %3, #16\n" \
|
||||
"42:\n" \
|
||||
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
|
||||
"43:\n" \
|
||||
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
|
||||
"44:\n" \
|
||||
"SUB %0, %0, #4\n" \
|
||||
"45:\n" \
|
||||
"SETD [%0++], D0.7\n" \
|
||||
"SUB %3, %3, #16\n" \
|
||||
"1:" \
|
||||
|
@ -483,11 +498,19 @@
|
|||
" .long 34b,3b\n" \
|
||||
" .long 35b,3b\n" \
|
||||
" .long 36b,3b\n" \
|
||||
" .long 37b,4b\n" \
|
||||
" .long 37b,3b\n" \
|
||||
" .long 38b,3b\n" \
|
||||
" .long 39b,3b\n" \
|
||||
" .long 40b,3b\n" \
|
||||
" .long 41b,3b\n" \
|
||||
" .long 42b,3b\n" \
|
||||
" .long 43b,3b\n" \
|
||||
" .long 44b,3b\n" \
|
||||
" .long 45b,4b\n" \
|
||||
" .previous\n" \
|
||||
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
|
||||
: "0" (to), "1" (from), "2" (ret), "3" (n) \
|
||||
: "D1Ar1", "D0Ar2", "memory")
|
||||
: "D1Ar1", "D0Ar2", "cc", "memory")
|
||||
|
||||
/* rewind 'to' and 'from' pointers when a fault occurs
|
||||
*
|
||||
|
@ -513,7 +536,7 @@
|
|||
#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
|
||||
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
|
||||
"LSR D0Ar2, D0Ar2, #8\n" \
|
||||
"AND D0Ar2, D0Ar2, #0x7\n" \
|
||||
"ANDS D0Ar2, D0Ar2, #0x7\n" \
|
||||
"ADDZ D0Ar2, D0Ar2, #4\n" \
|
||||
"SUB D0Ar2, D0Ar2, #1\n" \
|
||||
"MOV D1Ar1, #4\n" \
|
||||
|
@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
|
|||
if ((unsigned long) src & 1) {
|
||||
__asm_copy_to_user_1(dst, src, retn);
|
||||
n--;
|
||||
if (retn)
|
||||
return retn + n;
|
||||
}
|
||||
if ((unsigned long) dst & 1) {
|
||||
/* Worst case - byte copy */
|
||||
while (n > 0) {
|
||||
__asm_copy_to_user_1(dst, src, retn);
|
||||
n--;
|
||||
if (retn)
|
||||
return retn + n;
|
||||
}
|
||||
}
|
||||
if (((unsigned long) src & 2) && n >= 2) {
|
||||
__asm_copy_to_user_2(dst, src, retn);
|
||||
n -= 2;
|
||||
if (retn)
|
||||
return retn + n;
|
||||
}
|
||||
if ((unsigned long) dst & 2) {
|
||||
/* Second worst case - word copy */
|
||||
while (n >= 2) {
|
||||
__asm_copy_to_user_2(dst, src, retn);
|
||||
n -= 2;
|
||||
if (retn)
|
||||
return retn + n;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
|
|||
while (n >= 8) {
|
||||
__asm_copy_to_user_8x64(dst, src, retn);
|
||||
n -= 8;
|
||||
if (retn)
|
||||
return retn + n;
|
||||
}
|
||||
}
|
||||
if (n >= RAPF_MIN_BUF_SIZE) {
|
||||
|
@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
|
|||
while (n >= 8) {
|
||||
__asm_copy_to_user_8x64(dst, src, retn);
|
||||
n -= 8;
|
||||
if (retn)
|
||||
return retn + n;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
|
|||
while (n >= 16) {
|
||||
__asm_copy_to_user_16(dst, src, retn);
|
||||
n -= 16;
|
||||
if (retn)
|
||||
return retn + n;
|
||||
}
|
||||
|
||||
while (n >= 4) {
|
||||
__asm_copy_to_user_4(dst, src, retn);
|
||||
n -= 4;
|
||||
if (retn)
|
||||
return retn + n;
|
||||
}
|
||||
|
||||
switch (n) {
|
||||
|
@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
|
|||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we get here, retn correctly reflects the number of failing
|
||||
* bytes.
|
||||
*/
|
||||
return retn;
|
||||
}
|
||||
EXPORT_SYMBOL(__copy_user);
|
||||
|
@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user);
|
|||
__asm_copy_user_cont(to, from, ret, \
|
||||
" GETB D1Ar1,[%1++]\n" \
|
||||
"2: SETB [%0++],D1Ar1\n", \
|
||||
"3: ADD %2,%2,#1\n" \
|
||||
" SETB [%0++],D1Ar1\n", \
|
||||
"3: ADD %2,%2,#1\n", \
|
||||
" .long 2b,3b\n")
|
||||
|
||||
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
|
||||
__asm_copy_user_cont(to, from, ret, \
|
||||
" GETW D1Ar1,[%1++]\n" \
|
||||
"2: SETW [%0++],D1Ar1\n" COPY, \
|
||||
"3: ADD %2,%2,#2\n" \
|
||||
" SETW [%0++],D1Ar1\n" FIXUP, \
|
||||
"3: ADD %2,%2,#2\n" FIXUP, \
|
||||
" .long 2b,3b\n" TENTRY)
|
||||
|
||||
#define __asm_copy_from_user_2(to, from, ret) \
|
||||
|
@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user);
|
|||
__asm_copy_from_user_2x_cont(to, from, ret, \
|
||||
" GETB D1Ar1,[%1++]\n" \
|
||||
"4: SETB [%0++],D1Ar1\n", \
|
||||
"5: ADD %2,%2,#1\n" \
|
||||
" SETB [%0++],D1Ar1\n", \
|
||||
"5: ADD %2,%2,#1\n", \
|
||||
" .long 4b,5b\n")
|
||||
|
||||
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
|
||||
__asm_copy_user_cont(to, from, ret, \
|
||||
" GETD D1Ar1,[%1++]\n" \
|
||||
"2: SETD [%0++],D1Ar1\n" COPY, \
|
||||
"3: ADD %2,%2,#4\n" \
|
||||
" SETD [%0++],D1Ar1\n" FIXUP, \
|
||||
"3: ADD %2,%2,#4\n" FIXUP, \
|
||||
" .long 2b,3b\n" TENTRY)
|
||||
|
||||
#define __asm_copy_from_user_4(to, from, ret) \
|
||||
__asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
|
||||
|
||||
#define __asm_copy_from_user_5(to, from, ret) \
|
||||
__asm_copy_from_user_4x_cont(to, from, ret, \
|
||||
" GETB D1Ar1,[%1++]\n" \
|
||||
"4: SETB [%0++],D1Ar1\n", \
|
||||
"5: ADD %2,%2,#1\n" \
|
||||
" SETB [%0++],D1Ar1\n", \
|
||||
" .long 4b,5b\n")
|
||||
|
||||
#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
|
||||
__asm_copy_from_user_4x_cont(to, from, ret, \
|
||||
" GETW D1Ar1,[%1++]\n" \
|
||||
"4: SETW [%0++],D1Ar1\n" COPY, \
|
||||
"5: ADD %2,%2,#2\n" \
|
||||
" SETW [%0++],D1Ar1\n" FIXUP, \
|
||||
" .long 4b,5b\n" TENTRY)
|
||||
|
||||
#define __asm_copy_from_user_6(to, from, ret) \
|
||||
__asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
|
||||
|
||||
#define __asm_copy_from_user_7(to, from, ret) \
|
||||
__asm_copy_from_user_6x_cont(to, from, ret, \
|
||||
" GETB D1Ar1,[%1++]\n" \
|
||||
"6: SETB [%0++],D1Ar1\n", \
|
||||
"7: ADD %2,%2,#1\n" \
|
||||
" SETB [%0++],D1Ar1\n", \
|
||||
" .long 6b,7b\n")
|
||||
|
||||
#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
|
||||
__asm_copy_from_user_4x_cont(to, from, ret, \
|
||||
" GETD D1Ar1,[%1++]\n" \
|
||||
"4: SETD [%0++],D1Ar1\n" COPY, \
|
||||
"5: ADD %2,%2,#4\n" \
|
||||
" SETD [%0++],D1Ar1\n" FIXUP, \
|
||||
" .long 4b,5b\n" TENTRY)
|
||||
|
||||
#define __asm_copy_from_user_8(to, from, ret) \
|
||||
__asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
|
||||
|
||||
#define __asm_copy_from_user_9(to, from, ret) \
|
||||
__asm_copy_from_user_8x_cont(to, from, ret, \
|
||||
" GETB D1Ar1,[%1++]\n" \
|
||||
"6: SETB [%0++],D1Ar1\n", \
|
||||
"7: ADD %2,%2,#1\n" \
|
||||
" SETB [%0++],D1Ar1\n", \
|
||||
" .long 6b,7b\n")
|
||||
|
||||
#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
|
||||
__asm_copy_from_user_8x_cont(to, from, ret, \
|
||||
" GETW D1Ar1,[%1++]\n" \
|
||||
"6: SETW [%0++],D1Ar1\n" COPY, \
|
||||
"7: ADD %2,%2,#2\n" \
|
||||
" SETW [%0++],D1Ar1\n" FIXUP, \
|
||||
" .long 6b,7b\n" TENTRY)
|
||||
|
||||
#define __asm_copy_from_user_10(to, from, ret) \
|
||||
__asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
|
||||
|
||||
#define __asm_copy_from_user_11(to, from, ret) \
|
||||
__asm_copy_from_user_10x_cont(to, from, ret, \
|
||||
" GETB D1Ar1,[%1++]\n" \
|
||||
"8: SETB [%0++],D1Ar1\n", \
|
||||
"9: ADD %2,%2,#1\n" \
|
||||
" SETB [%0++],D1Ar1\n", \
|
||||
" .long 8b,9b\n")
|
||||
|
||||
#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
|
||||
__asm_copy_from_user_8x_cont(to, from, ret, \
|
||||
" GETD D1Ar1,[%1++]\n" \
|
||||
"6: SETD [%0++],D1Ar1\n" COPY, \
|
||||
"7: ADD %2,%2,#4\n" \
|
||||
" SETD [%0++],D1Ar1\n" FIXUP, \
|
||||
" .long 6b,7b\n" TENTRY)
|
||||
|
||||
#define __asm_copy_from_user_12(to, from, ret) \
|
||||
__asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
|
||||
|
||||
#define __asm_copy_from_user_13(to, from, ret) \
|
||||
__asm_copy_from_user_12x_cont(to, from, ret, \
|
||||
" GETB D1Ar1,[%1++]\n" \
|
||||
"8: SETB [%0++],D1Ar1\n", \
|
||||
"9: ADD %2,%2,#1\n" \
|
||||
" SETB [%0++],D1Ar1\n", \
|
||||
" .long 8b,9b\n")
|
||||
|
||||
#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
|
||||
__asm_copy_from_user_12x_cont(to, from, ret, \
|
||||
" GETW D1Ar1,[%1++]\n" \
|
||||
"8: SETW [%0++],D1Ar1\n" COPY, \
|
||||
"9: ADD %2,%2,#2\n" \
|
||||
" SETW [%0++],D1Ar1\n" FIXUP, \
|
||||
" .long 8b,9b\n" TENTRY)
|
||||
|
||||
#define __asm_copy_from_user_14(to, from, ret) \
|
||||
__asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
|
||||
|
||||
#define __asm_copy_from_user_15(to, from, ret) \
|
||||
__asm_copy_from_user_14x_cont(to, from, ret, \
|
||||
" GETB D1Ar1,[%1++]\n" \
|
||||
"10: SETB [%0++],D1Ar1\n", \
|
||||
"11: ADD %2,%2,#1\n" \
|
||||
" SETB [%0++],D1Ar1\n", \
|
||||
" .long 10b,11b\n")
|
||||
|
||||
#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
|
||||
__asm_copy_from_user_12x_cont(to, from, ret, \
|
||||
" GETD D1Ar1,[%1++]\n" \
|
||||
"8: SETD [%0++],D1Ar1\n" COPY, \
|
||||
"9: ADD %2,%2,#4\n" \
|
||||
" SETD [%0++],D1Ar1\n" FIXUP, \
|
||||
" .long 8b,9b\n" TENTRY)
|
||||
|
||||
#define __asm_copy_from_user_16(to, from, ret) \
|
||||
__asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
|
||||
|
||||
#define __asm_copy_from_user_8x64(to, from, ret) \
|
||||
asm volatile ( \
|
||||
" GETL D0Ar2,D1Ar1,[%1++]\n" \
|
||||
"2: SETL [%0++],D0Ar2,D1Ar1\n" \
|
||||
"1:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" MOV D1Ar1,#0\n" \
|
||||
" MOV D0Ar2,#0\n" \
|
||||
"3: ADD %2,%2,#8\n" \
|
||||
" SETL [%0++],D0Ar2,D1Ar1\n" \
|
||||
" MOVT D0Ar2,#HI(1b)\n" \
|
||||
" JUMP D0Ar2,#LO(1b)\n" \
|
||||
" .previous\n" \
|
||||
|
@ -789,35 +711,56 @@ EXPORT_SYMBOL(__copy_user);
|
|||
*
|
||||
* Rationale:
|
||||
* A fault occurs while reading from user buffer, which is the
|
||||
* source. Since the fault is at a single address, we only
|
||||
* need to rewind by 8 bytes.
|
||||
* source.
|
||||
* Since we don't write to kernel buffer until we read first,
|
||||
* the kernel buffer is at the right state and needn't be
|
||||
* corrected.
|
||||
* corrected, but the source must be rewound to the beginning of
|
||||
* the block, which is LSM_STEP*8 bytes.
|
||||
* LSM_STEP is bits 10:8 in TXSTATUS which is already read
|
||||
* and stored in D0Ar2
|
||||
*
|
||||
* NOTE: If a fault occurs at the last operation in M{G,S}ETL
|
||||
* LSM_STEP will be 0. ie: we do 4 writes in our case, if
|
||||
* a fault happens at the 4th write, LSM_STEP will be 0
|
||||
* instead of 4. The code copes with that.
|
||||
*/
|
||||
#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
|
||||
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
|
||||
"SUB %1, %1, #8\n")
|
||||
"LSR D0Ar2, D0Ar2, #5\n" \
|
||||
"ANDS D0Ar2, D0Ar2, #0x38\n" \
|
||||
"ADDZ D0Ar2, D0Ar2, #32\n" \
|
||||
"SUB %1, %1, D0Ar2\n")
|
||||
|
||||
/* rewind 'from' pointer when a fault occurs
|
||||
*
|
||||
* Rationale:
|
||||
* A fault occurs while reading from user buffer, which is the
|
||||
* source. Since the fault is at a single address, we only
|
||||
* need to rewind by 4 bytes.
|
||||
* source.
|
||||
* Since we don't write to kernel buffer until we read first,
|
||||
* the kernel buffer is at the right state and needn't be
|
||||
* corrected.
|
||||
* corrected, but the source must be rewound to the beginning of
|
||||
* the block, which is LSM_STEP*4 bytes.
|
||||
* LSM_STEP is bits 10:8 in TXSTATUS which is already read
|
||||
* and stored in D0Ar2
|
||||
*
|
||||
* NOTE: If a fault occurs at the last operation in M{G,S}ETL
|
||||
* LSM_STEP will be 0. ie: we do 4 writes in our case, if
|
||||
* a fault happens at the 4th write, LSM_STEP will be 0
|
||||
* instead of 4. The code copes with that.
|
||||
*/
|
||||
#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
|
||||
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
|
||||
"SUB %1, %1, #4\n")
|
||||
"LSR D0Ar2, D0Ar2, #6\n" \
|
||||
"ANDS D0Ar2, D0Ar2, #0x1c\n" \
|
||||
"ADDZ D0Ar2, D0Ar2, #16\n" \
|
||||
"SUB %1, %1, D0Ar2\n")
|
||||
|
||||
|
||||
/* Copy from user to kernel, zeroing the bytes that were inaccessible in
|
||||
userland. The return-value is the number of bytes that were
|
||||
inaccessible. */
|
||||
unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
|
||||
/*
|
||||
* Copy from user to kernel. The return-value is the number of bytes that were
|
||||
* inaccessible.
|
||||
*/
|
||||
unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
|
||||
unsigned long n)
|
||||
{
|
||||
register char *dst asm ("A0.2") = pdst;
|
||||
|
@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
|
|||
if ((unsigned long) src & 1) {
|
||||
__asm_copy_from_user_1(dst, src, retn);
|
||||
n--;
|
||||
if (retn)
|
||||
return retn + n;
|
||||
}
|
||||
if ((unsigned long) dst & 1) {
|
||||
/* Worst case - byte copy */
|
||||
|
@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
|
|||
__asm_copy_from_user_1(dst, src, retn);
|
||||
n--;
|
||||
if (retn)
|
||||
goto copy_exception_bytes;
|
||||
return retn + n;
|
||||
}
|
||||
}
|
||||
if (((unsigned long) src & 2) && n >= 2) {
|
||||
__asm_copy_from_user_2(dst, src, retn);
|
||||
n -= 2;
|
||||
if (retn)
|
||||
return retn + n;
|
||||
}
|
||||
if ((unsigned long) dst & 2) {
|
||||
/* Second worst case - word copy */
|
||||
|
@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
|
|||
__asm_copy_from_user_2(dst, src, retn);
|
||||
n -= 2;
|
||||
if (retn)
|
||||
goto copy_exception_bytes;
|
||||
return retn + n;
|
||||
}
|
||||
}
|
||||
|
||||
/* We only need one check after the unalignment-adjustments,
|
||||
because if both adjustments were done, either both or
|
||||
neither reference had an exception. */
|
||||
if (retn != 0)
|
||||
goto copy_exception_bytes;
|
||||
|
||||
#ifdef USE_RAPF
|
||||
/* 64 bit copy loop */
|
||||
if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
|
||||
|
@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
|
|||
__asm_copy_from_user_8x64(dst, src, retn);
|
||||
n -= 8;
|
||||
if (retn)
|
||||
goto copy_exception_bytes;
|
||||
return retn + n;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
|
|||
__asm_copy_from_user_8x64(dst, src, retn);
|
||||
n -= 8;
|
||||
if (retn)
|
||||
goto copy_exception_bytes;
|
||||
return retn + n;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
|
|||
n -= 4;
|
||||
|
||||
if (retn)
|
||||
goto copy_exception_bytes;
|
||||
return retn + n;
|
||||
}
|
||||
|
||||
/* If we get here, there were no memory read faults. */
|
||||
|
@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
|
|||
/* If we get here, retn correctly reflects the number of failing
|
||||
bytes. */
|
||||
return retn;
|
||||
|
||||
copy_exception_bytes:
|
||||
/* We already have "retn" bytes cleared, and need to clear the
|
||||
remaining "n" bytes. A non-optimized simple byte-for-byte in-line
|
||||
memset is preferred here, since this isn't speed-critical code and
|
||||
we'd rather have this a leaf-function than calling memset. */
|
||||
{
|
||||
char *endp;
|
||||
for (endp = dst + n; dst < endp; dst++)
|
||||
*dst = 0;
|
||||
}
|
||||
|
||||
return retn + n;
|
||||
}
|
||||
EXPORT_SYMBOL(__copy_user_zeroing);
|
||||
EXPORT_SYMBOL(raw_copy_from_user);
|
||||
|
||||
#define __asm_clear_8x64(to, ret) \
|
||||
asm volatile ( \
|
||||
|
|
|
@ -1531,7 +1531,7 @@ config CPU_MIPS64_R6
|
|||
select CPU_SUPPORTS_HIGHMEM
|
||||
select CPU_SUPPORTS_MSA
|
||||
select GENERIC_CSUM
|
||||
select MIPS_O32_FP64_SUPPORT if MIPS32_O32
|
||||
select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
|
||||
select HAVE_KVM
|
||||
help
|
||||
Choose this option to build a kernel for release 6 or later of the
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <asm/cpu-features.h>
|
||||
#include <asm/fpu_emulator.h>
|
||||
#include <asm/hazards.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/current.h>
|
||||
#include <asm/msa.h>
|
||||
|
|
|
@ -18,9 +18,24 @@
|
|||
#include <irq.h>
|
||||
|
||||
#define IRQ_STACK_SIZE THREAD_SIZE
|
||||
#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long))
|
||||
|
||||
extern void *irq_stack[NR_CPUS];
|
||||
|
||||
/*
|
||||
* The highest address on the IRQ stack contains a dummy frame put down in
|
||||
* genex.S (handle_int & except_vec_vi_handler) which is structured as follows:
|
||||
*
|
||||
* top ------------
|
||||
* | task sp | <- irq_stack[cpu] + IRQ_STACK_START
|
||||
* ------------
|
||||
* | | <- First frame of IRQ context
|
||||
* ------------
|
||||
*
|
||||
* task sp holds a copy of the task stack pointer where the struct pt_regs
|
||||
* from exception entry can be found.
|
||||
*/
|
||||
|
||||
static inline bool on_irq_stack(int cpu, unsigned long sp)
|
||||
{
|
||||
unsigned long low = (unsigned long)irq_stack[cpu];
|
||||
|
|
|
@ -127,7 +127,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|||
" andi %[ticket], %[ticket], 0xffff \n"
|
||||
" bne %[ticket], %[my_ticket], 4f \n"
|
||||
" subu %[ticket], %[my_ticket], %[ticket] \n"
|
||||
"2: \n"
|
||||
"2: .insn \n"
|
||||
" .subsection 2 \n"
|
||||
"4: andi %[ticket], %[ticket], 0xffff \n"
|
||||
" sll %[ticket], 5 \n"
|
||||
|
@ -202,7 +202,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
|
|||
" sc %[ticket], %[ticket_ptr] \n"
|
||||
" beqz %[ticket], 1b \n"
|
||||
" li %[ticket], 1 \n"
|
||||
"2: \n"
|
||||
"2: .insn \n"
|
||||
" .subsection 2 \n"
|
||||
"3: b 2b \n"
|
||||
" li %[ticket], 0 \n"
|
||||
|
@ -382,7 +382,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|||
" .set reorder \n"
|
||||
__WEAK_LLSC_MB
|
||||
" li %2, 1 \n"
|
||||
"2: \n"
|
||||
"2: .insn \n"
|
||||
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
|
||||
: GCC_OFF_SMALL_ASM() (rw->lock)
|
||||
: "memory");
|
||||
|
@ -422,7 +422,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
|
|||
" lui %1, 0x8000 \n"
|
||||
" sc %1, %0 \n"
|
||||
" li %2, 1 \n"
|
||||
"2: \n"
|
||||
"2: .insn \n"
|
||||
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
|
||||
"=&r" (ret)
|
||||
: GCC_OFF_SMALL_ASM() (rw->lock)
|
||||
|
|
|
@ -386,17 +386,18 @@
|
|||
#define __NR_pkey_mprotect (__NR_Linux + 363)
|
||||
#define __NR_pkey_alloc (__NR_Linux + 364)
|
||||
#define __NR_pkey_free (__NR_Linux + 365)
|
||||
#define __NR_statx (__NR_Linux + 366)
|
||||
|
||||
|
||||
/*
|
||||
* Offset of the last Linux o32 flavoured syscall
|
||||
*/
|
||||
#define __NR_Linux_syscalls 365
|
||||
#define __NR_Linux_syscalls 366
|
||||
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
|
||||
|
||||
#define __NR_O32_Linux 4000
|
||||
#define __NR_O32_Linux_syscalls 365
|
||||
#define __NR_O32_Linux_syscalls 366
|
||||
|
||||
#if _MIPS_SIM == _MIPS_SIM_ABI64
|
||||
|
||||
|
@ -730,16 +731,17 @@
|
|||
#define __NR_pkey_mprotect (__NR_Linux + 323)
|
||||
#define __NR_pkey_alloc (__NR_Linux + 324)
|
||||
#define __NR_pkey_free (__NR_Linux + 325)
|
||||
#define __NR_statx (__NR_Linux + 326)
|
||||
|
||||
/*
|
||||
* Offset of the last Linux 64-bit flavoured syscall
|
||||
*/
|
||||
#define __NR_Linux_syscalls 325
|
||||
#define __NR_Linux_syscalls 326
|
||||
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
|
||||
|
||||
#define __NR_64_Linux 5000
|
||||
#define __NR_64_Linux_syscalls 325
|
||||
#define __NR_64_Linux_syscalls 326
|
||||
|
||||
#if _MIPS_SIM == _MIPS_SIM_NABI32
|
||||
|
||||
|
@ -1077,15 +1079,16 @@
|
|||
#define __NR_pkey_mprotect (__NR_Linux + 327)
|
||||
#define __NR_pkey_alloc (__NR_Linux + 328)
|
||||
#define __NR_pkey_free (__NR_Linux + 329)
|
||||
#define __NR_statx (__NR_Linux + 330)
|
||||
|
||||
/*
|
||||
* Offset of the last N32 flavoured syscall
|
||||
*/
|
||||
#define __NR_Linux_syscalls 329
|
||||
#define __NR_Linux_syscalls 330
|
||||
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
|
||||
|
||||
#define __NR_N32_Linux 6000
|
||||
#define __NR_N32_Linux_syscalls 329
|
||||
#define __NR_N32_Linux_syscalls 330
|
||||
|
||||
#endif /* _UAPI_ASM_UNISTD_H */
|
||||
|
|
|
@ -102,6 +102,7 @@ void output_thread_info_defines(void)
|
|||
DEFINE(_THREAD_SIZE, THREAD_SIZE);
|
||||
DEFINE(_THREAD_MASK, THREAD_MASK);
|
||||
DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
|
||||
DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
|
||||
BLANK();
|
||||
}
|
||||
|
||||
|
|
|
@ -361,7 +361,7 @@ LEAF(mips_cps_get_bootcfg)
|
|||
END(mips_cps_get_bootcfg)
|
||||
|
||||
LEAF(mips_cps_boot_vpes)
|
||||
PTR_L ta2, COREBOOTCFG_VPEMASK(a0)
|
||||
lw ta2, COREBOOTCFG_VPEMASK(a0)
|
||||
PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
|
||||
|
||||
#if defined(CONFIG_CPU_MIPSR6)
|
||||
|
|
|
@ -1824,7 +1824,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
|||
}
|
||||
|
||||
decode_configs(c);
|
||||
c->options |= MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
|
||||
c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
|
||||
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -215,9 +215,11 @@ NESTED(handle_int, PT_SIZE, sp)
|
|||
beq t0, t1, 2f
|
||||
|
||||
/* Switch to IRQ stack */
|
||||
li t1, _IRQ_STACK_SIZE
|
||||
li t1, _IRQ_STACK_START
|
||||
PTR_ADD sp, t0, t1
|
||||
|
||||
/* Save task's sp on IRQ stack so that unwinding can follow it */
|
||||
LONG_S s1, 0(sp)
|
||||
2:
|
||||
jal plat_irq_dispatch
|
||||
|
||||
|
@ -325,9 +327,11 @@ NESTED(except_vec_vi_handler, 0, sp)
|
|||
beq t0, t1, 2f
|
||||
|
||||
/* Switch to IRQ stack */
|
||||
li t1, _IRQ_STACK_SIZE
|
||||
li t1, _IRQ_STACK_START
|
||||
PTR_ADD sp, t0, t1
|
||||
|
||||
/* Save task's sp on IRQ stack so that unwinding can follow it */
|
||||
LONG_S s1, 0(sp)
|
||||
2:
|
||||
jalr v0
|
||||
|
||||
|
@ -519,7 +523,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
|
|||
BUILD_HANDLER reserved reserved sti verbose /* others */
|
||||
|
||||
.align 5
|
||||
LEAF(handle_ri_rdhwr_vivt)
|
||||
LEAF(handle_ri_rdhwr_tlbp)
|
||||
.set push
|
||||
.set noat
|
||||
.set noreorder
|
||||
|
@ -538,7 +542,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
|
|||
.set pop
|
||||
bltz k1, handle_ri /* slow path */
|
||||
/* fall thru */
|
||||
END(handle_ri_rdhwr_vivt)
|
||||
END(handle_ri_rdhwr_tlbp)
|
||||
|
||||
LEAF(handle_ri_rdhwr)
|
||||
.set push
|
||||
|
|
|
@ -488,32 +488,53 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
|
|||
unsigned long pc,
|
||||
unsigned long *ra)
|
||||
{
|
||||
unsigned long low, high, irq_stack_high;
|
||||
struct mips_frame_info info;
|
||||
unsigned long size, ofs;
|
||||
struct pt_regs *regs;
|
||||
int leaf;
|
||||
extern void ret_from_irq(void);
|
||||
extern void ret_from_exception(void);
|
||||
|
||||
if (!stack_page)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If we reached the bottom of interrupt context,
|
||||
* return saved pc in pt_regs.
|
||||
* IRQ stacks start at IRQ_STACK_START
|
||||
* task stacks at THREAD_SIZE - 32
|
||||
*/
|
||||
if (pc == (unsigned long)ret_from_irq ||
|
||||
pc == (unsigned long)ret_from_exception) {
|
||||
struct pt_regs *regs;
|
||||
if (*sp >= stack_page &&
|
||||
*sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
|
||||
regs = (struct pt_regs *)*sp;
|
||||
low = stack_page;
|
||||
if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
|
||||
high = stack_page + IRQ_STACK_START;
|
||||
irq_stack_high = high;
|
||||
} else {
|
||||
high = stack_page + THREAD_SIZE - 32;
|
||||
irq_stack_high = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we reached the top of the interrupt stack, start unwinding
|
||||
* the interrupted task stack.
|
||||
*/
|
||||
if (unlikely(*sp == irq_stack_high)) {
|
||||
unsigned long task_sp = *(unsigned long *)*sp;
|
||||
|
||||
/*
|
||||
* Check that the pointer saved in the IRQ stack head points to
|
||||
* something within the stack of the current task
|
||||
*/
|
||||
if (!object_is_on_stack((void *)task_sp))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Follow pointer to tasks kernel stack frame where interrupted
|
||||
* state was saved.
|
||||
*/
|
||||
regs = (struct pt_regs *)task_sp;
|
||||
pc = regs->cp0_epc;
|
||||
if (!user_mode(regs) && __kernel_text_address(pc)) {
|
||||
*sp = regs->regs[29];
|
||||
*ra = regs->regs[31];
|
||||
return pc;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
|
||||
|
@ -533,8 +554,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
|
|||
if (leaf < 0)
|
||||
return 0;
|
||||
|
||||
if (*sp < stack_page ||
|
||||
*sp + info.frame_size > stack_page + THREAD_SIZE - 32)
|
||||
if (*sp < low || *sp + info.frame_size > high)
|
||||
return 0;
|
||||
|
||||
if (leaf)
|
||||
|
|
|
@ -456,7 +456,8 @@ static int fpr_set(struct task_struct *target,
|
|||
&target->thread.fpu,
|
||||
0, sizeof(elf_fpregset_t));
|
||||
|
||||
for (i = 0; i < NUM_FPU_REGS; i++) {
|
||||
BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
|
||||
for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
|
||||
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&fpr_val, i * sizeof(elf_fpreg_t),
|
||||
(i + 1) * sizeof(elf_fpreg_t));
|
||||
|
|
|
@ -600,3 +600,4 @@ EXPORT(sys_call_table)
|
|||
PTR sys_pkey_mprotect
|
||||
PTR sys_pkey_alloc
|
||||
PTR sys_pkey_free /* 4365 */
|
||||
PTR sys_statx
|
||||
|
|
|
@ -438,4 +438,5 @@ EXPORT(sys_call_table)
|
|||
PTR sys_pkey_mprotect
|
||||
PTR sys_pkey_alloc
|
||||
PTR sys_pkey_free /* 5325 */
|
||||
PTR sys_statx
|
||||
.size sys_call_table,.-sys_call_table
|
||||
|
|
|
@ -433,4 +433,5 @@ EXPORT(sysn32_call_table)
|
|||
PTR sys_pkey_mprotect
|
||||
PTR sys_pkey_alloc
|
||||
PTR sys_pkey_free
|
||||
PTR sys_statx /* 6330 */
|
||||
.size sysn32_call_table,.-sysn32_call_table
|
||||
|
|
|
@ -588,4 +588,5 @@ EXPORT(sys32_call_table)
|
|||
PTR sys_pkey_mprotect
|
||||
PTR sys_pkey_alloc
|
||||
PTR sys_pkey_free /* 4365 */
|
||||
PTR sys_statx
|
||||
.size sys32_call_table,.-sys32_call_table
|
||||
|
|
|
@ -83,7 +83,7 @@ extern asmlinkage void handle_dbe(void);
|
|||
extern asmlinkage void handle_sys(void);
|
||||
extern asmlinkage void handle_bp(void);
|
||||
extern asmlinkage void handle_ri(void);
|
||||
extern asmlinkage void handle_ri_rdhwr_vivt(void);
|
||||
extern asmlinkage void handle_ri_rdhwr_tlbp(void);
|
||||
extern asmlinkage void handle_ri_rdhwr(void);
|
||||
extern asmlinkage void handle_cpu(void);
|
||||
extern asmlinkage void handle_ov(void);
|
||||
|
@ -2408,9 +2408,18 @@ void __init trap_init(void)
|
|||
|
||||
set_except_vector(EXCCODE_SYS, handle_sys);
|
||||
set_except_vector(EXCCODE_BP, handle_bp);
|
||||
set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri :
|
||||
(cpu_has_vtag_icache ?
|
||||
handle_ri_rdhwr_vivt : handle_ri_rdhwr));
|
||||
|
||||
if (rdhwr_noopt)
|
||||
set_except_vector(EXCCODE_RI, handle_ri);
|
||||
else {
|
||||
if (cpu_has_vtag_icache)
|
||||
set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
|
||||
else if (current_cpu_type() == CPU_LOONGSON3)
|
||||
set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
|
||||
else
|
||||
set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
|
||||
}
|
||||
|
||||
set_except_vector(EXCCODE_CPU, handle_cpu);
|
||||
set_except_vector(EXCCODE_OV, handle_ov);
|
||||
set_except_vector(EXCCODE_TR, handle_tr);
|
||||
|
|
|
@ -467,7 +467,7 @@ void __init ltq_soc_init(void)
|
|||
|
||||
if (!np_xbar)
|
||||
panic("Failed to load xbar nodes from devicetree");
|
||||
if (of_address_to_resource(np_pmu, 0, &res_xbar))
|
||||
if (of_address_to_resource(np_xbar, 0, &res_xbar))
|
||||
panic("Failed to get xbar resources");
|
||||
if (!request_mem_region(res_xbar.start, resource_size(&res_xbar),
|
||||
res_xbar.name))
|
||||
|
|
|
@ -1562,6 +1562,7 @@ static void probe_vcache(void)
|
|||
vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
|
||||
|
||||
c->vcache.waybit = 0;
|
||||
c->vcache.waysize = vcache_size / c->vcache.ways;
|
||||
|
||||
pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
|
||||
vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
|
||||
|
@ -1664,6 +1665,7 @@ static void __init loongson3_sc_init(void)
|
|||
/* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
|
||||
scache_size *= 4;
|
||||
c->scache.waybit = 0;
|
||||
c->scache.waysize = scache_size / c->scache.ways;
|
||||
pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
|
||||
scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
|
||||
if (scache_size)
|
||||
|
|
|
@ -760,7 +760,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte,
|
|||
static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
|
||||
struct uasm_label **l,
|
||||
unsigned int pte,
|
||||
unsigned int ptr)
|
||||
unsigned int ptr,
|
||||
unsigned int flush)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
UASM_i_SC(p, pte, 0, ptr);
|
||||
|
@ -769,6 +770,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
|
|||
#else
|
||||
UASM_i_SW(p, pte, 0, ptr);
|
||||
#endif
|
||||
if (cpu_has_ftlb && flush) {
|
||||
BUG_ON(!cpu_has_tlbinv);
|
||||
|
||||
UASM_i_MFC0(p, ptr, C0_ENTRYHI);
|
||||
uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
|
||||
UASM_i_MTC0(p, ptr, C0_ENTRYHI);
|
||||
build_tlb_write_entry(p, l, r, tlb_indexed);
|
||||
|
||||
uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
|
||||
UASM_i_MTC0(p, ptr, C0_ENTRYHI);
|
||||
build_huge_update_entries(p, pte, ptr);
|
||||
build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
build_huge_update_entries(p, pte, ptr);
|
||||
build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
|
||||
}
|
||||
|
@ -2199,7 +2216,7 @@ static void build_r4000_tlb_load_handler(void)
|
|||
uasm_l_tlbl_goaround2(&l, p);
|
||||
}
|
||||
uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
|
||||
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
|
||||
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
|
||||
#endif
|
||||
|
||||
uasm_l_nopage_tlbl(&l, p);
|
||||
|
@ -2254,7 +2271,7 @@ static void build_r4000_tlb_store_handler(void)
|
|||
build_tlb_probe_entry(&p);
|
||||
uasm_i_ori(&p, wr.r1, wr.r1,
|
||||
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
|
||||
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
|
||||
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
|
||||
#endif
|
||||
|
||||
uasm_l_nopage_tlbs(&l, p);
|
||||
|
@ -2310,7 +2327,7 @@ static void build_r4000_tlb_modify_handler(void)
|
|||
build_tlb_probe_entry(&p);
|
||||
uasm_i_ori(&p, wr.r1, wr.r1,
|
||||
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
|
||||
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
|
||||
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
|
||||
#endif
|
||||
|
||||
uasm_l_nopage_tlbm(&l, p);
|
||||
|
|
|
@ -35,7 +35,7 @@ static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
|
|||
static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
|
||||
static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
|
||||
static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
|
||||
static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) };
|
||||
static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
|
||||
static struct rt2880_pmx_func pci_func[] = {
|
||||
FUNC("pci-dev", 0, 40, 32),
|
||||
FUNC("pci-host2", 1, 40, 32),
|
||||
|
@ -43,7 +43,7 @@ static struct rt2880_pmx_func pci_func[] = {
|
|||
FUNC("pci-fnc", 3, 40, 32)
|
||||
};
|
||||
static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
|
||||
static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) };
|
||||
static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
|
||||
|
||||
static struct rt2880_pmx_group rt3883_pinmux_data[] = {
|
||||
GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
|
||||
|
|
|
@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
|
|||
return alloc_bootmem_align(size, align);
|
||||
}
|
||||
|
||||
int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
|
||||
bool nomap)
|
||||
{
|
||||
reserve_bootmem(base, size, BOOTMEM_DEFAULT);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init early_init_devtree(void *params)
|
||||
{
|
||||
__be32 *dtb = (u32 *)__dtb_start;
|
||||
|
|
|
@ -201,6 +201,9 @@ void __init setup_arch(char **cmdline_p)
|
|||
}
|
||||
#endif /* CONFIG_BLK_DEV_INITRD */
|
||||
|
||||
early_init_fdt_reserve_self();
|
||||
early_init_fdt_scan_reserved_mem();
|
||||
|
||||
unflatten_and_copy_device_tree();
|
||||
|
||||
setup_cpuinfo();
|
||||
|
|
|
@ -64,6 +64,15 @@ struct exception_table_entry {
|
|||
".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
|
||||
".previous\n"
|
||||
|
||||
/*
|
||||
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
|
||||
* (with lowest bit set) for which the fault handler in fixup_exception() will
|
||||
* load -EFAULT into %r8 for a read or write fault, and zeroes the target
|
||||
* register in case of a read fault in get_user().
|
||||
*/
|
||||
#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
|
||||
ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
|
||||
|
||||
/*
|
||||
* The page fault handler stores, in a per-cpu area, the following information
|
||||
* if a fixup routine is available.
|
||||
|
@ -91,7 +100,7 @@ struct exception_data {
|
|||
#define __get_user(x, ptr) \
|
||||
({ \
|
||||
register long __gu_err __asm__ ("r8") = 0; \
|
||||
register long __gu_val __asm__ ("r9") = 0; \
|
||||
register long __gu_val; \
|
||||
\
|
||||
load_sr2(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
|
@ -107,22 +116,23 @@ struct exception_data {
|
|||
})
|
||||
|
||||
#define __get_user_asm(ldx, ptr) \
|
||||
__asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
|
||||
__asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
|
||||
"9:\n" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
|
||||
: "=r"(__gu_val), "=r"(__gu_err) \
|
||||
: "r"(ptr), "1"(__gu_err) \
|
||||
: "r1");
|
||||
: "r"(ptr), "1"(__gu_err));
|
||||
|
||||
#if !defined(CONFIG_64BIT)
|
||||
|
||||
#define __get_user_asm64(ptr) \
|
||||
__asm__("\n1:\tldw 0(%%sr2,%2),%0" \
|
||||
"\n2:\tldw 4(%%sr2,%2),%R0\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\
|
||||
ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\
|
||||
__asm__(" copy %%r0,%R0\n" \
|
||||
"1: ldw 0(%%sr2,%2),%0\n" \
|
||||
"2: ldw 4(%%sr2,%2),%R0\n" \
|
||||
"9:\n" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
|
||||
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
|
||||
: "=r"(__gu_val), "=r"(__gu_err) \
|
||||
: "r"(ptr), "1"(__gu_err) \
|
||||
: "r1");
|
||||
: "r"(ptr), "1"(__gu_err));
|
||||
|
||||
#endif /* !defined(CONFIG_64BIT) */
|
||||
|
||||
|
@ -148,32 +158,31 @@ struct exception_data {
|
|||
* The "__put_user/kernel_asm()" macros tell gcc they read from memory
|
||||
* instead of writing. This is because they do not write to any memory
|
||||
* gcc knows about, so there are no aliasing issues. These macros must
|
||||
* also be aware that "fixup_put_user_skip_[12]" are executed in the
|
||||
* context of the fault, and any registers used there must be listed
|
||||
* as clobbers. In this case only "r1" is used by the current routines.
|
||||
* r8/r9 are already listed as err/val.
|
||||
* also be aware that fixups are executed in the context of the fault,
|
||||
* and any registers used there must be listed as clobbers.
|
||||
* r8 is already listed as err.
|
||||
*/
|
||||
|
||||
#define __put_user_asm(stx, x, ptr) \
|
||||
__asm__ __volatile__ ( \
|
||||
"\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
|
||||
"1: " stx " %2,0(%%sr2,%1)\n" \
|
||||
"9:\n" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
|
||||
: "=r"(__pu_err) \
|
||||
: "r"(ptr), "r"(x), "0"(__pu_err) \
|
||||
: "r1")
|
||||
: "r"(ptr), "r"(x), "0"(__pu_err))
|
||||
|
||||
|
||||
#if !defined(CONFIG_64BIT)
|
||||
|
||||
#define __put_user_asm64(__val, ptr) do { \
|
||||
__asm__ __volatile__ ( \
|
||||
"\n1:\tstw %2,0(%%sr2,%1)" \
|
||||
"\n2:\tstw %R2,4(%%sr2,%1)\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
|
||||
ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
|
||||
"1: stw %2,0(%%sr2,%1)\n" \
|
||||
"2: stw %R2,4(%%sr2,%1)\n" \
|
||||
"9:\n" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
|
||||
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
|
||||
: "=r"(__pu_err) \
|
||||
: "r"(ptr), "r"(__val), "0"(__pu_err) \
|
||||
: "r1"); \
|
||||
: "r"(ptr), "r"(__val), "0"(__pu_err)); \
|
||||
} while (0)
|
||||
|
||||
#endif /* !defined(CONFIG_64BIT) */
|
||||
|
|
|
@ -47,16 +47,6 @@ EXPORT_SYMBOL(__cmpxchg_u64);
|
|||
EXPORT_SYMBOL(lclear_user);
|
||||
EXPORT_SYMBOL(lstrnlen_user);
|
||||
|
||||
/* Global fixups - defined as int to avoid creation of function pointers */
|
||||
extern int fixup_get_user_skip_1;
|
||||
extern int fixup_get_user_skip_2;
|
||||
extern int fixup_put_user_skip_1;
|
||||
extern int fixup_put_user_skip_2;
|
||||
EXPORT_SYMBOL(fixup_get_user_skip_1);
|
||||
EXPORT_SYMBOL(fixup_get_user_skip_2);
|
||||
EXPORT_SYMBOL(fixup_put_user_skip_1);
|
||||
EXPORT_SYMBOL(fixup_put_user_skip_2);
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
/* Needed so insmod can set dp value */
|
||||
extern int $global$;
|
||||
|
|
|
@ -143,6 +143,8 @@ void machine_power_off(void)
|
|||
printk(KERN_EMERG "System shut down completed.\n"
|
||||
"Please power this system off now.");
|
||||
|
||||
/* prevent soft lockup/stalled CPU messages for endless loop. */
|
||||
rcu_sysrq_start();
|
||||
for (;;);
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# Makefile for parisc-specific library files
|
||||
#
|
||||
|
||||
lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \
|
||||
lib-y := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
|
||||
ucmpdi2.o delay.o
|
||||
|
||||
obj-y := iomap.o
|
||||
|
|
|
@ -1,98 +0,0 @@
|
|||
/*
|
||||
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
|
||||
*
|
||||
* Copyright (C) 2004 Randolph Chung <tausq@debian.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
* Fixup routines for kernel exception handling.
|
||||
*/
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/assembly.h>
|
||||
#include <asm/errno.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
.macro get_fault_ip t1 t2
|
||||
loadgp
|
||||
addil LT%__per_cpu_offset,%r27
|
||||
LDREG RT%__per_cpu_offset(%r1),\t1
|
||||
/* t2 = smp_processor_id() */
|
||||
mfctl 30,\t2
|
||||
ldw TI_CPU(\t2),\t2
|
||||
#ifdef CONFIG_64BIT
|
||||
extrd,u \t2,63,32,\t2
|
||||
#endif
|
||||
/* t2 = &__per_cpu_offset[smp_processor_id()]; */
|
||||
LDREGX \t2(\t1),\t2
|
||||
addil LT%exception_data,%r27
|
||||
LDREG RT%exception_data(%r1),\t1
|
||||
/* t1 = this_cpu_ptr(&exception_data) */
|
||||
add,l \t1,\t2,\t1
|
||||
/* %r27 = t1->fault_gp - restore gp */
|
||||
LDREG EXCDATA_GP(\t1), %r27
|
||||
/* t1 = t1->fault_ip */
|
||||
LDREG EXCDATA_IP(\t1), \t1
|
||||
.endm
|
||||
#else
|
||||
.macro get_fault_ip t1 t2
|
||||
loadgp
|
||||
/* t1 = this_cpu_ptr(&exception_data) */
|
||||
addil LT%exception_data,%r27
|
||||
LDREG RT%exception_data(%r1),\t2
|
||||
/* %r27 = t2->fault_gp - restore gp */
|
||||
LDREG EXCDATA_GP(\t2), %r27
|
||||
/* t1 = t2->fault_ip */
|
||||
LDREG EXCDATA_IP(\t2), \t1
|
||||
.endm
|
||||
#endif
|
||||
|
||||
.level LEVEL
|
||||
|
||||
.text
|
||||
.section .fixup, "ax"
|
||||
|
||||
/* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
|
||||
ENTRY_CFI(fixup_get_user_skip_1)
|
||||
get_fault_ip %r1,%r8
|
||||
ldo 4(%r1), %r1
|
||||
ldi -EFAULT, %r8
|
||||
bv %r0(%r1)
|
||||
copy %r0, %r9
|
||||
ENDPROC_CFI(fixup_get_user_skip_1)
|
||||
|
||||
ENTRY_CFI(fixup_get_user_skip_2)
|
||||
get_fault_ip %r1,%r8
|
||||
ldo 8(%r1), %r1
|
||||
ldi -EFAULT, %r8
|
||||
bv %r0(%r1)
|
||||
copy %r0, %r9
|
||||
ENDPROC_CFI(fixup_get_user_skip_2)
|
||||
|
||||
/* put_user() fixups, store -EFAULT in r8 */
|
||||
ENTRY_CFI(fixup_put_user_skip_1)
|
||||
get_fault_ip %r1,%r8
|
||||
ldo 4(%r1), %r1
|
||||
bv %r0(%r1)
|
||||
ldi -EFAULT, %r8
|
||||
ENDPROC_CFI(fixup_put_user_skip_1)
|
||||
|
||||
ENTRY_CFI(fixup_put_user_skip_2)
|
||||
get_fault_ip %r1,%r8
|
||||
ldo 8(%r1), %r1
|
||||
bv %r0(%r1)
|
||||
ldi -EFAULT, %r8
|
||||
ENDPROC_CFI(fixup_put_user_skip_2)
|
||||
|
|
@ -5,6 +5,8 @@
|
|||
* Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
|
||||
* Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr>
|
||||
* Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
|
||||
* Copyright (C) 2017 Helge Deller <deller@gmx.de>
|
||||
* Copyright (C) 2017 John David Anglin <dave.anglin@bell.net>
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
|
@ -132,4 +134,320 @@ ENDPROC_CFI(lstrnlen_user)
|
|||
|
||||
.procend
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
|
||||
*
|
||||
* Inputs:
|
||||
* - sr1 already contains space of source region
|
||||
* - sr2 already contains space of destination region
|
||||
*
|
||||
* Returns:
|
||||
* - number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*
|
||||
* This code is based on a C-implementation of a copy routine written by
|
||||
* Randolph Chung, which in turn was derived from the glibc.
|
||||
*
|
||||
* Several strategies are tried to try to get the best performance for various
|
||||
* conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes
|
||||
* at a time using general registers. Unaligned copies are handled either by
|
||||
* aligning the destination and then using shift-and-write method, or in a few
|
||||
* cases by falling back to a byte-at-a-time copy.
|
||||
*
|
||||
* Testing with various alignments and buffer sizes shows that this code is
|
||||
* often >10x faster than a simple byte-at-a-time copy, even for strangely
|
||||
* aligned operands. It is interesting to note that the glibc version of memcpy
|
||||
* (written in C) is actually quite fast already. This routine is able to beat
|
||||
* it by 30-40% for aligned copies because of the loop unrolling, but in some
|
||||
* cases the glibc version is still slightly faster. This lends more
|
||||
* credibility that gcc can generate very good code as long as we are careful.
|
||||
*
|
||||
* Possible optimizations:
|
||||
* - add cache prefetching
|
||||
* - try not to use the post-increment address modifiers; they may create
|
||||
* additional interlocks. Assumption is that those were only efficient on old
|
||||
* machines (pre PA8000 processors)
|
||||
*/
|
||||
|
||||
dst = arg0
|
||||
src = arg1
|
||||
len = arg2
|
||||
end = arg3
|
||||
t1 = r19
|
||||
t2 = r20
|
||||
t3 = r21
|
||||
t4 = r22
|
||||
srcspc = sr1
|
||||
dstspc = sr2
|
||||
|
||||
t0 = r1
|
||||
a1 = t1
|
||||
a2 = t2
|
||||
a3 = t3
|
||||
a0 = t4
|
||||
|
||||
save_src = ret0
|
||||
save_dst = ret1
|
||||
save_len = r31
|
||||
|
||||
ENTRY_CFI(pa_memcpy)
|
||||
.proc
|
||||
.callinfo NO_CALLS
|
||||
.entry
|
||||
|
||||
/* Last destination address */
|
||||
add dst,len,end
|
||||
|
||||
/* short copy with less than 16 bytes? */
|
||||
cmpib,>>=,n 15,len,.Lbyte_loop
|
||||
|
||||
/* same alignment? */
|
||||
xor src,dst,t0
|
||||
extru t0,31,2,t1
|
||||
cmpib,<>,n 0,t1,.Lunaligned_copy
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
/* only do 64-bit copies if we can get aligned. */
|
||||
extru t0,31,3,t1
|
||||
cmpib,<>,n 0,t1,.Lalign_loop32
|
||||
|
||||
/* loop until we are 64-bit aligned */
|
||||
.Lalign_loop64:
|
||||
extru dst,31,3,t1
|
||||
cmpib,=,n 0,t1,.Lcopy_loop_16
|
||||
20: ldb,ma 1(srcspc,src),t1
|
||||
21: stb,ma t1,1(dstspc,dst)
|
||||
b .Lalign_loop64
|
||||
ldo -1(len),len
|
||||
|
||||
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
|
||||
|
||||
ldi 31,t0
|
||||
.Lcopy_loop_16:
|
||||
cmpb,COND(>>=),n t0,len,.Lword_loop
|
||||
|
||||
10: ldd 0(srcspc,src),t1
|
||||
11: ldd 8(srcspc,src),t2
|
||||
ldo 16(src),src
|
||||
12: std,ma t1,8(dstspc,dst)
|
||||
13: std,ma t2,8(dstspc,dst)
|
||||
14: ldd 0(srcspc,src),t1
|
||||
15: ldd 8(srcspc,src),t2
|
||||
ldo 16(src),src
|
||||
16: std,ma t1,8(dstspc,dst)
|
||||
17: std,ma t2,8(dstspc,dst)
|
||||
|
||||
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
|
||||
|
||||
b .Lcopy_loop_16
|
||||
ldo -32(len),len
|
||||
|
||||
.Lword_loop:
|
||||
cmpib,COND(>>=),n 3,len,.Lbyte_loop
|
||||
20: ldw,ma 4(srcspc,src),t1
|
||||
21: stw,ma t1,4(dstspc,dst)
|
||||
b .Lword_loop
|
||||
ldo -4(len),len
|
||||
|
||||
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
|
||||
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
/* loop until we are 32-bit aligned */
|
||||
.Lalign_loop32:
|
||||
extru dst,31,2,t1
|
||||
cmpib,=,n 0,t1,.Lcopy_loop_4
|
||||
20: ldb,ma 1(srcspc,src),t1
|
||||
21: stb,ma t1,1(dstspc,dst)
|
||||
b .Lalign_loop32
|
||||
ldo -1(len),len
|
||||
|
||||
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
|
||||
|
||||
|
||||
.Lcopy_loop_4:
|
||||
cmpib,COND(>>=),n 15,len,.Lbyte_loop
|
||||
|
||||
10: ldw 0(srcspc,src),t1
|
||||
11: ldw 4(srcspc,src),t2
|
||||
12: stw,ma t1,4(dstspc,dst)
|
||||
13: stw,ma t2,4(dstspc,dst)
|
||||
14: ldw 8(srcspc,src),t1
|
||||
15: ldw 12(srcspc,src),t2
|
||||
ldo 16(src),src
|
||||
16: stw,ma t1,4(dstspc,dst)
|
||||
17: stw,ma t2,4(dstspc,dst)
|
||||
|
||||
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
|
||||
|
||||
b .Lcopy_loop_4
|
||||
ldo -16(len),len
|
||||
|
||||
.Lbyte_loop:
|
||||
cmpclr,COND(<>) len,%r0,%r0
|
||||
b,n .Lcopy_done
|
||||
20: ldb 0(srcspc,src),t1
|
||||
ldo 1(src),src
|
||||
21: stb,ma t1,1(dstspc,dst)
|
||||
b .Lbyte_loop
|
||||
ldo -1(len),len
|
||||
|
||||
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
|
||||
|
||||
.Lcopy_done:
|
||||
bv %r0(%r2)
|
||||
sub end,dst,ret0
|
||||
|
||||
|
||||
/* src and dst are not aligned the same way. */
|
||||
/* need to go the hard way */
|
||||
.Lunaligned_copy:
|
||||
/* align until dst is 32bit-word-aligned */
|
||||
extru dst,31,2,t1
|
||||
cmpib,COND(=),n 0,t1,.Lcopy_dstaligned
|
||||
20: ldb 0(srcspc,src),t1
|
||||
ldo 1(src),src
|
||||
21: stb,ma t1,1(dstspc,dst)
|
||||
b .Lunaligned_copy
|
||||
ldo -1(len),len
|
||||
|
||||
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
|
||||
|
||||
.Lcopy_dstaligned:
|
||||
|
||||
/* store src, dst and len in safe place */
|
||||
copy src,save_src
|
||||
copy dst,save_dst
|
||||
copy len,save_len
|
||||
|
||||
/* len now needs give number of words to copy */
|
||||
SHRREG len,2,len
|
||||
|
||||
/*
|
||||
* Copy from a not-aligned src to an aligned dst using shifts.
|
||||
* Handles 4 words per loop.
|
||||
*/
|
||||
|
||||
depw,z src,28,2,t0
|
||||
subi 32,t0,t0
|
||||
mtsar t0
|
||||
extru len,31,2,t0
|
||||
cmpib,= 2,t0,.Lcase2
|
||||
/* Make src aligned by rounding it down. */
|
||||
depi 0,31,2,src
|
||||
|
||||
cmpiclr,<> 3,t0,%r0
|
||||
b,n .Lcase3
|
||||
cmpiclr,<> 1,t0,%r0
|
||||
b,n .Lcase1
|
||||
.Lcase0:
|
||||
cmpb,= %r0,len,.Lcda_finish
|
||||
nop
|
||||
|
||||
1: ldw,ma 4(srcspc,src), a3
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
|
||||
1: ldw,ma 4(srcspc,src), a0
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
|
||||
b,n .Ldo3
|
||||
.Lcase1:
|
||||
1: ldw,ma 4(srcspc,src), a2
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
|
||||
1: ldw,ma 4(srcspc,src), a3
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
|
||||
ldo -1(len),len
|
||||
cmpb,=,n %r0,len,.Ldo0
|
||||
.Ldo4:
|
||||
1: ldw,ma 4(srcspc,src), a0
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
|
||||
shrpw a2, a3, %sar, t0
|
||||
1: stw,ma t0, 4(dstspc,dst)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
|
||||
.Ldo3:
|
||||
1: ldw,ma 4(srcspc,src), a1
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
|
||||
shrpw a3, a0, %sar, t0
|
||||
1: stw,ma t0, 4(dstspc,dst)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
|
||||
.Ldo2:
|
||||
1: ldw,ma 4(srcspc,src), a2
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
|
||||
shrpw a0, a1, %sar, t0
|
||||
1: stw,ma t0, 4(dstspc,dst)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
|
||||
.Ldo1:
|
||||
1: ldw,ma 4(srcspc,src), a3
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
|
||||
shrpw a1, a2, %sar, t0
|
||||
1: stw,ma t0, 4(dstspc,dst)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
|
||||
ldo -4(len),len
|
||||
cmpb,<> %r0,len,.Ldo4
|
||||
nop
|
||||
.Ldo0:
|
||||
shrpw a2, a3, %sar, t0
|
||||
1: stw,ma t0, 4(dstspc,dst)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
|
||||
|
||||
.Lcda_rdfault:
|
||||
.Lcda_finish:
|
||||
/* calculate new src, dst and len and jump to byte-copy loop */
|
||||
sub dst,save_dst,t0
|
||||
add save_src,t0,src
|
||||
b .Lbyte_loop
|
||||
sub save_len,t0,len
|
||||
|
||||
.Lcase3:
|
||||
1: ldw,ma 4(srcspc,src), a0
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
|
||||
1: ldw,ma 4(srcspc,src), a1
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
|
||||
b .Ldo2
|
||||
ldo 1(len),len
|
||||
.Lcase2:
|
||||
1: ldw,ma 4(srcspc,src), a1
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
|
||||
1: ldw,ma 4(srcspc,src), a2
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
|
||||
b .Ldo1
|
||||
ldo 2(len),len
|
||||
|
||||
|
||||
/* fault exception fixup handlers: */
|
||||
#ifdef CONFIG_64BIT
|
||||
.Lcopy16_fault:
|
||||
10: b .Lcopy_done
|
||||
std,ma t1,8(dstspc,dst)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
|
||||
#endif
|
||||
|
||||
.Lcopy8_fault:
|
||||
10: b .Lcopy_done
|
||||
stw,ma t1,4(dstspc,dst)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
|
||||
|
||||
.exit
|
||||
ENDPROC_CFI(pa_memcpy)
|
||||
.procend
|
||||
|
||||
.end
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* Optimized memory copy routines.
|
||||
*
|
||||
* Copyright (C) 2004 Randolph Chung <tausq@debian.org>
|
||||
* Copyright (C) 2013 Helge Deller <deller@gmx.de>
|
||||
* Copyright (C) 2013-2017 Helge Deller <deller@gmx.de>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
|
@ -21,474 +21,21 @@
|
|||
* Portions derived from the GNU C Library
|
||||
* Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
|
||||
*
|
||||
* Several strategies are tried to try to get the best performance for various
|
||||
* conditions. In the optimal case, we copy 64-bytes in an unrolled loop using
|
||||
* fp regs. This is followed by loops that copy 32- or 16-bytes at a time using
|
||||
* general registers. Unaligned copies are handled either by aligning the
|
||||
* destination and then using shift-and-write method, or in a few cases by
|
||||
* falling back to a byte-at-a-time copy.
|
||||
*
|
||||
* I chose to implement this in C because it is easier to maintain and debug,
|
||||
* and in my experiments it appears that the C code generated by gcc (3.3/3.4
|
||||
* at the time of writing) is fairly optimal. Unfortunately some of the
|
||||
* semantics of the copy routine (exception handling) is difficult to express
|
||||
* in C, so we have to play some tricks to get it to work.
|
||||
*
|
||||
* All the loads and stores are done via explicit asm() code in order to use
|
||||
* the right space registers.
|
||||
*
|
||||
* Testing with various alignments and buffer sizes shows that this code is
|
||||
* often >10x faster than a simple byte-at-a-time copy, even for strangely
|
||||
* aligned operands. It is interesting to note that the glibc version
|
||||
* of memcpy (written in C) is actually quite fast already. This routine is
|
||||
* able to beat it by 30-40% for aligned copies because of the loop unrolling,
|
||||
* but in some cases the glibc version is still slightly faster. This lends
|
||||
* more credibility that gcc can generate very good code as long as we are
|
||||
* careful.
|
||||
*
|
||||
* TODO:
|
||||
* - cache prefetching needs more experimentation to get optimal settings
|
||||
* - try not to use the post-increment address modifiers; they create additional
|
||||
* interlocks
|
||||
* - replace byte-copy loops with stybs sequences
|
||||
*/
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/module.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/uaccess.h>
|
||||
#define s_space "%%sr1"
|
||||
#define d_space "%%sr2"
|
||||
#else
|
||||
#include "memcpy.h"
|
||||
#define s_space "%%sr0"
|
||||
#define d_space "%%sr0"
|
||||
#define pa_memcpy new2_copy
|
||||
#endif
|
||||
|
||||
DECLARE_PER_CPU(struct exception_data, exception_data);
|
||||
|
||||
#define preserve_branch(label) do { \
|
||||
volatile int dummy = 0; \
|
||||
/* The following branch is never taken, it's just here to */ \
|
||||
/* prevent gcc from optimizing away our exception code. */ \
|
||||
if (unlikely(dummy != dummy)) \
|
||||
goto label; \
|
||||
} while (0)
|
||||
|
||||
#define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3))
|
||||
#define get_kernel_space() (0)
|
||||
|
||||
#define MERGE(w0, sh_1, w1, sh_2) ({ \
|
||||
unsigned int _r; \
|
||||
asm volatile ( \
|
||||
"mtsar %3\n" \
|
||||
"shrpw %1, %2, %%sar, %0\n" \
|
||||
: "=r"(_r) \
|
||||
: "r"(w0), "r"(w1), "r"(sh_2) \
|
||||
); \
|
||||
_r; \
|
||||
})
|
||||
#define THRESHOLD 16
|
||||
|
||||
#ifdef DEBUG_MEMCPY
|
||||
#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
|
||||
#else
|
||||
#define DPRINTF(fmt, args...)
|
||||
#endif
|
||||
|
||||
#define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
|
||||
__asm__ __volatile__ ( \
|
||||
"1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
|
||||
: _tt(_t), "+r"(_a) \
|
||||
: \
|
||||
: "r8")
|
||||
|
||||
#define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
|
||||
__asm__ __volatile__ ( \
|
||||
"1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
|
||||
: "+r"(_a) \
|
||||
: _tt(_t) \
|
||||
: "r8")
|
||||
|
||||
#define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e)
|
||||
#define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e)
|
||||
#define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e)
|
||||
#define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e)
|
||||
#define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e)
|
||||
#define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e)
|
||||
|
||||
#define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \
|
||||
__asm__ __volatile__ ( \
|
||||
"1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
|
||||
: _tt(_t) \
|
||||
: "r"(_a) \
|
||||
: "r8")
|
||||
|
||||
#define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \
|
||||
__asm__ __volatile__ ( \
|
||||
"1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
|
||||
: \
|
||||
: _tt(_t), "r"(_a) \
|
||||
: "r8")
|
||||
|
||||
#define ldw(_s,_o,_a,_t,_e) def_load_insn(ldw,"=r",_s,_o,_a,_t,_e)
|
||||
#define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e)
|
||||
|
||||
#ifdef CONFIG_PREFETCH
|
||||
static inline void prefetch_src(const void *addr)
|
||||
{
|
||||
__asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr));
|
||||
}
|
||||
|
||||
static inline void prefetch_dst(const void *addr)
|
||||
{
|
||||
__asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr));
|
||||
}
|
||||
#else
|
||||
#define prefetch_src(addr) do { } while(0)
|
||||
#define prefetch_dst(addr) do { } while(0)
|
||||
#endif
|
||||
|
||||
#define PA_MEMCPY_OK 0
|
||||
#define PA_MEMCPY_LOAD_ERROR 1
|
||||
#define PA_MEMCPY_STORE_ERROR 2
|
||||
|
||||
/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
|
||||
* per loop. This code is derived from glibc.
|
||||
*/
|
||||
static noinline unsigned long copy_dstaligned(unsigned long dst,
|
||||
unsigned long src, unsigned long len)
|
||||
{
|
||||
/* gcc complains that a2 and a3 may be uninitialized, but actually
|
||||
* they cannot be. Initialize a2/a3 to shut gcc up.
|
||||
*/
|
||||
register unsigned int a0, a1, a2 = 0, a3 = 0;
|
||||
int sh_1, sh_2;
|
||||
|
||||
/* prefetch_src((const void *)src); */
|
||||
|
||||
/* Calculate how to shift a word read at the memory operation
|
||||
aligned srcp to make it aligned for copy. */
|
||||
sh_1 = 8 * (src % sizeof(unsigned int));
|
||||
sh_2 = 8 * sizeof(unsigned int) - sh_1;
|
||||
|
||||
/* Make src aligned by rounding it down. */
|
||||
src &= -sizeof(unsigned int);
|
||||
|
||||
switch (len % 4)
|
||||
{
|
||||
case 2:
|
||||
/* a1 = ((unsigned int *) src)[0];
|
||||
a2 = ((unsigned int *) src)[1]; */
|
||||
ldw(s_space, 0, src, a1, cda_ldw_exc);
|
||||
ldw(s_space, 4, src, a2, cda_ldw_exc);
|
||||
src -= 1 * sizeof(unsigned int);
|
||||
dst -= 3 * sizeof(unsigned int);
|
||||
len += 2;
|
||||
goto do1;
|
||||
case 3:
|
||||
/* a0 = ((unsigned int *) src)[0];
|
||||
a1 = ((unsigned int *) src)[1]; */
|
||||
ldw(s_space, 0, src, a0, cda_ldw_exc);
|
||||
ldw(s_space, 4, src, a1, cda_ldw_exc);
|
||||
src -= 0 * sizeof(unsigned int);
|
||||
dst -= 2 * sizeof(unsigned int);
|
||||
len += 1;
|
||||
goto do2;
|
||||
case 0:
|
||||
if (len == 0)
|
||||
return PA_MEMCPY_OK;
|
||||
/* a3 = ((unsigned int *) src)[0];
|
||||
a0 = ((unsigned int *) src)[1]; */
|
||||
ldw(s_space, 0, src, a3, cda_ldw_exc);
|
||||
ldw(s_space, 4, src, a0, cda_ldw_exc);
|
||||
src -=-1 * sizeof(unsigned int);
|
||||
dst -= 1 * sizeof(unsigned int);
|
||||
len += 0;
|
||||
goto do3;
|
||||
case 1:
|
||||
/* a2 = ((unsigned int *) src)[0];
|
||||
a3 = ((unsigned int *) src)[1]; */
|
||||
ldw(s_space, 0, src, a2, cda_ldw_exc);
|
||||
ldw(s_space, 4, src, a3, cda_ldw_exc);
|
||||
src -=-2 * sizeof(unsigned int);
|
||||
dst -= 0 * sizeof(unsigned int);
|
||||
len -= 1;
|
||||
if (len == 0)
|
||||
goto do0;
|
||||
goto do4; /* No-op. */
|
||||
}
|
||||
|
||||
do
|
||||
{
|
||||
/* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */
|
||||
do4:
|
||||
/* a0 = ((unsigned int *) src)[0]; */
|
||||
ldw(s_space, 0, src, a0, cda_ldw_exc);
|
||||
/* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
|
||||
stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
|
||||
do3:
|
||||
/* a1 = ((unsigned int *) src)[1]; */
|
||||
ldw(s_space, 4, src, a1, cda_ldw_exc);
|
||||
/* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */
|
||||
stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc);
|
||||
do2:
|
||||
/* a2 = ((unsigned int *) src)[2]; */
|
||||
ldw(s_space, 8, src, a2, cda_ldw_exc);
|
||||
/* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */
|
||||
stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc);
|
||||
do1:
|
||||
/* a3 = ((unsigned int *) src)[3]; */
|
||||
ldw(s_space, 12, src, a3, cda_ldw_exc);
|
||||
/* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */
|
||||
stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc);
|
||||
|
||||
src += 4 * sizeof(unsigned int);
|
||||
dst += 4 * sizeof(unsigned int);
|
||||
len -= 4;
|
||||
}
|
||||
while (len != 0);
|
||||
|
||||
do0:
|
||||
/* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
|
||||
stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
|
||||
|
||||
preserve_branch(handle_load_error);
|
||||
preserve_branch(handle_store_error);
|
||||
|
||||
return PA_MEMCPY_OK;
|
||||
|
||||
handle_load_error:
|
||||
__asm__ __volatile__ ("cda_ldw_exc:\n");
|
||||
return PA_MEMCPY_LOAD_ERROR;
|
||||
|
||||
handle_store_error:
|
||||
__asm__ __volatile__ ("cda_stw_exc:\n");
|
||||
return PA_MEMCPY_STORE_ERROR;
|
||||
}
|
||||
|
||||
|
||||
/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
|
||||
* In case of an access fault the faulty address can be read from the per_cpu
|
||||
* exception data struct. */
|
||||
static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
|
||||
unsigned long len)
|
||||
{
|
||||
register unsigned long src, dst, t1, t2, t3;
|
||||
register unsigned char *pcs, *pcd;
|
||||
register unsigned int *pws, *pwd;
|
||||
register double *pds, *pdd;
|
||||
unsigned long ret;
|
||||
|
||||
src = (unsigned long)srcp;
|
||||
dst = (unsigned long)dstp;
|
||||
pcs = (unsigned char *)srcp;
|
||||
pcd = (unsigned char *)dstp;
|
||||
|
||||
/* prefetch_src((const void *)srcp); */
|
||||
|
||||
if (len < THRESHOLD)
|
||||
goto byte_copy;
|
||||
|
||||
/* Check alignment */
|
||||
t1 = (src ^ dst);
|
||||
if (unlikely(t1 & (sizeof(double)-1)))
|
||||
goto unaligned_copy;
|
||||
|
||||
/* src and dst have same alignment. */
|
||||
|
||||
/* Copy bytes till we are double-aligned. */
|
||||
t2 = src & (sizeof(double) - 1);
|
||||
if (unlikely(t2 != 0)) {
|
||||
t2 = sizeof(double) - t2;
|
||||
while (t2 && len) {
|
||||
/* *pcd++ = *pcs++; */
|
||||
ldbma(s_space, pcs, t3, pmc_load_exc);
|
||||
len--;
|
||||
stbma(d_space, t3, pcd, pmc_store_exc);
|
||||
t2--;
|
||||
}
|
||||
}
|
||||
|
||||
pds = (double *)pcs;
|
||||
pdd = (double *)pcd;
|
||||
|
||||
#if 0
|
||||
/* Copy 8 doubles at a time */
|
||||
while (len >= 8*sizeof(double)) {
|
||||
register double r1, r2, r3, r4, r5, r6, r7, r8;
|
||||
/* prefetch_src((char *)pds + L1_CACHE_BYTES); */
|
||||
flddma(s_space, pds, r1, pmc_load_exc);
|
||||
flddma(s_space, pds, r2, pmc_load_exc);
|
||||
flddma(s_space, pds, r3, pmc_load_exc);
|
||||
flddma(s_space, pds, r4, pmc_load_exc);
|
||||
fstdma(d_space, r1, pdd, pmc_store_exc);
|
||||
fstdma(d_space, r2, pdd, pmc_store_exc);
|
||||
fstdma(d_space, r3, pdd, pmc_store_exc);
|
||||
fstdma(d_space, r4, pdd, pmc_store_exc);
|
||||
|
||||
#if 0
|
||||
if (L1_CACHE_BYTES <= 32)
|
||||
prefetch_src((char *)pds + L1_CACHE_BYTES);
|
||||
#endif
|
||||
flddma(s_space, pds, r5, pmc_load_exc);
|
||||
flddma(s_space, pds, r6, pmc_load_exc);
|
||||
flddma(s_space, pds, r7, pmc_load_exc);
|
||||
flddma(s_space, pds, r8, pmc_load_exc);
|
||||
fstdma(d_space, r5, pdd, pmc_store_exc);
|
||||
fstdma(d_space, r6, pdd, pmc_store_exc);
|
||||
fstdma(d_space, r7, pdd, pmc_store_exc);
|
||||
fstdma(d_space, r8, pdd, pmc_store_exc);
|
||||
len -= 8*sizeof(double);
|
||||
}
|
||||
#endif
|
||||
|
||||
pws = (unsigned int *)pds;
|
||||
pwd = (unsigned int *)pdd;
|
||||
|
||||
word_copy:
|
||||
while (len >= 8*sizeof(unsigned int)) {
|
||||
register unsigned int r1,r2,r3,r4,r5,r6,r7,r8;
|
||||
/* prefetch_src((char *)pws + L1_CACHE_BYTES); */
|
||||
ldwma(s_space, pws, r1, pmc_load_exc);
|
||||
ldwma(s_space, pws, r2, pmc_load_exc);
|
||||
ldwma(s_space, pws, r3, pmc_load_exc);
|
||||
ldwma(s_space, pws, r4, pmc_load_exc);
|
||||
stwma(d_space, r1, pwd, pmc_store_exc);
|
||||
stwma(d_space, r2, pwd, pmc_store_exc);
|
||||
stwma(d_space, r3, pwd, pmc_store_exc);
|
||||
stwma(d_space, r4, pwd, pmc_store_exc);
|
||||
|
||||
ldwma(s_space, pws, r5, pmc_load_exc);
|
||||
ldwma(s_space, pws, r6, pmc_load_exc);
|
||||
ldwma(s_space, pws, r7, pmc_load_exc);
|
||||
ldwma(s_space, pws, r8, pmc_load_exc);
|
||||
stwma(d_space, r5, pwd, pmc_store_exc);
|
||||
stwma(d_space, r6, pwd, pmc_store_exc);
|
||||
stwma(d_space, r7, pwd, pmc_store_exc);
|
||||
stwma(d_space, r8, pwd, pmc_store_exc);
|
||||
len -= 8*sizeof(unsigned int);
|
||||
}
|
||||
|
||||
while (len >= 4*sizeof(unsigned int)) {
|
||||
register unsigned int r1,r2,r3,r4;
|
||||
ldwma(s_space, pws, r1, pmc_load_exc);
|
||||
ldwma(s_space, pws, r2, pmc_load_exc);
|
||||
ldwma(s_space, pws, r3, pmc_load_exc);
|
||||
ldwma(s_space, pws, r4, pmc_load_exc);
|
||||
stwma(d_space, r1, pwd, pmc_store_exc);
|
||||
stwma(d_space, r2, pwd, pmc_store_exc);
|
||||
stwma(d_space, r3, pwd, pmc_store_exc);
|
||||
stwma(d_space, r4, pwd, pmc_store_exc);
|
||||
len -= 4*sizeof(unsigned int);
|
||||
}
|
||||
|
||||
pcs = (unsigned char *)pws;
|
||||
pcd = (unsigned char *)pwd;
|
||||
|
||||
byte_copy:
|
||||
while (len) {
|
||||
/* *pcd++ = *pcs++; */
|
||||
ldbma(s_space, pcs, t3, pmc_load_exc);
|
||||
stbma(d_space, t3, pcd, pmc_store_exc);
|
||||
len--;
|
||||
}
|
||||
|
||||
return PA_MEMCPY_OK;
|
||||
|
||||
unaligned_copy:
|
||||
/* possibly we are aligned on a word, but not on a double... */
|
||||
if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) {
|
||||
t2 = src & (sizeof(unsigned int) - 1);
|
||||
|
||||
if (unlikely(t2 != 0)) {
|
||||
t2 = sizeof(unsigned int) - t2;
|
||||
while (t2) {
|
||||
/* *pcd++ = *pcs++; */
|
||||
ldbma(s_space, pcs, t3, pmc_load_exc);
|
||||
stbma(d_space, t3, pcd, pmc_store_exc);
|
||||
len--;
|
||||
t2--;
|
||||
}
|
||||
}
|
||||
|
||||
pws = (unsigned int *)pcs;
|
||||
pwd = (unsigned int *)pcd;
|
||||
goto word_copy;
|
||||
}
|
||||
|
||||
/* Align the destination. */
|
||||
if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) {
|
||||
t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1));
|
||||
while (t2) {
|
||||
/* *pcd++ = *pcs++; */
|
||||
ldbma(s_space, pcs, t3, pmc_load_exc);
|
||||
stbma(d_space, t3, pcd, pmc_store_exc);
|
||||
len--;
|
||||
t2--;
|
||||
}
|
||||
dst = (unsigned long)pcd;
|
||||
src = (unsigned long)pcs;
|
||||
}
|
||||
|
||||
ret = copy_dstaligned(dst, src, len / sizeof(unsigned int));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pcs += (len & -sizeof(unsigned int));
|
||||
pcd += (len & -sizeof(unsigned int));
|
||||
len %= sizeof(unsigned int);
|
||||
|
||||
preserve_branch(handle_load_error);
|
||||
preserve_branch(handle_store_error);
|
||||
|
||||
goto byte_copy;
|
||||
|
||||
handle_load_error:
|
||||
__asm__ __volatile__ ("pmc_load_exc:\n");
|
||||
return PA_MEMCPY_LOAD_ERROR;
|
||||
|
||||
handle_store_error:
|
||||
__asm__ __volatile__ ("pmc_store_exc:\n");
|
||||
return PA_MEMCPY_STORE_ERROR;
|
||||
}
|
||||
|
||||
|
||||
/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
|
||||
static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
|
||||
{
|
||||
unsigned long ret, fault_addr, reference;
|
||||
struct exception_data *d;
|
||||
extern unsigned long pa_memcpy(void *dst, const void *src,
|
||||
unsigned long len);
|
||||
|
||||
ret = pa_memcpy_internal(dstp, srcp, len);
|
||||
if (likely(ret == PA_MEMCPY_OK))
|
||||
return 0;
|
||||
|
||||
/* if a load or store fault occured we can get the faulty addr */
|
||||
d = this_cpu_ptr(&exception_data);
|
||||
fault_addr = d->fault_addr;
|
||||
|
||||
/* error in load or store? */
|
||||
if (ret == PA_MEMCPY_LOAD_ERROR)
|
||||
reference = (unsigned long) srcp;
|
||||
else
|
||||
reference = (unsigned long) dstp;
|
||||
|
||||
DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n",
|
||||
ret, len, fault_addr, reference);
|
||||
|
||||
if (fault_addr >= reference)
|
||||
return len - (fault_addr - reference);
|
||||
else
|
||||
return len;
|
||||
}
|
||||
|
||||
#ifdef __KERNEL__
|
||||
unsigned long __copy_to_user(void __user *dst, const void *src,
|
||||
unsigned long len)
|
||||
{
|
||||
|
@ -537,5 +84,3 @@ long probe_kernel_read(void *dst, const void *src, size_t size)
|
|||
|
||||
return __probe_kernel_read(dst, src, size);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -150,6 +150,23 @@ int fixup_exception(struct pt_regs *regs)
|
|||
d->fault_space = regs->isr;
|
||||
d->fault_addr = regs->ior;
|
||||
|
||||
/*
|
||||
* Fix up get_user() and put_user().
|
||||
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
|
||||
* bit in the relative address of the fixup routine to indicate
|
||||
* that %r8 should be loaded with -EFAULT to report a userspace
|
||||
* access error.
|
||||
*/
|
||||
if (fix->fixup & 1) {
|
||||
regs->gr[8] = -EFAULT;
|
||||
|
||||
/* zero target register for get_user() */
|
||||
if (parisc_acctyp(0, regs->iir) == VM_READ) {
|
||||
int treg = regs->iir & 0x1f;
|
||||
regs->gr[treg] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup;
|
||||
regs->iaoq[0] &= ~3;
|
||||
/*
|
||||
|
|
|
@ -33,10 +33,13 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
|
|||
}
|
||||
|
||||
if (len & ~VMX_ALIGN_MASK) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
|
||||
disable_kernel_altivec();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
tail = len & VMX_ALIGN_MASK;
|
||||
|
|
|
@ -807,14 +807,25 @@ int fix_alignment(struct pt_regs *regs)
|
|||
nb = aligninfo[instr].len;
|
||||
flags = aligninfo[instr].flags;
|
||||
|
||||
/* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
|
||||
if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
|
||||
/*
|
||||
* Handle some cases which give overlaps in the DSISR values.
|
||||
*/
|
||||
if (IS_XFORM(instruction)) {
|
||||
switch (get_xop(instruction)) {
|
||||
case 532: /* ldbrx */
|
||||
nb = 8;
|
||||
flags = LD+SW;
|
||||
} else if (IS_XFORM(instruction) &&
|
||||
((instruction >> 1) & 0x3ff) == 660) {
|
||||
break;
|
||||
case 660: /* stdbrx */
|
||||
nb = 8;
|
||||
flags = ST+SW;
|
||||
break;
|
||||
case 20: /* lwarx */
|
||||
case 84: /* ldarx */
|
||||
case 116: /* lharx */
|
||||
case 276: /* lqarx */
|
||||
return 0; /* not emulated ever */
|
||||
}
|
||||
}
|
||||
|
||||
/* Byteswap little endian loads and stores */
|
||||
|
|
|
@ -67,7 +67,7 @@ PPC64_CACHES:
|
|||
* flush all bytes from start through stop-1 inclusive
|
||||
*/
|
||||
|
||||
_GLOBAL(flush_icache_range)
|
||||
_GLOBAL_TOC(flush_icache_range)
|
||||
BEGIN_FTR_SECTION
|
||||
PURGE_PREFETCHED_INS
|
||||
blr
|
||||
|
@ -120,7 +120,7 @@ EXPORT_SYMBOL(flush_icache_range)
|
|||
*
|
||||
* flush all bytes from start to stop-1 inclusive
|
||||
*/
|
||||
_GLOBAL(flush_dcache_range)
|
||||
_GLOBAL_TOC(flush_dcache_range)
|
||||
|
||||
/*
|
||||
* Flush the data cache to memory
|
||||
|
|
|
@ -236,6 +236,15 @@ static void cpu_ready_for_interrupts(void)
|
|||
mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fixup HFSCR:TM based on CPU features. The bit is set by our
|
||||
* early asm init because at that point we haven't updated our
|
||||
* CPU features from firmware and device-tree. Here we have,
|
||||
* so let's do it.
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
|
||||
mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
|
||||
|
||||
/* Set IR and DR in PACA MSR */
|
||||
get_paca()->kernel_msr = MSR_KERNEL;
|
||||
}
|
||||
|
|
|
@ -1487,6 +1487,10 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
|
|||
/* start new resize */
|
||||
|
||||
resize = kzalloc(sizeof(*resize), GFP_KERNEL);
|
||||
if (!resize) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
resize->order = shift;
|
||||
resize->kvm = kvm;
|
||||
INIT_WORK(&resize->work, resize_hpt_prepare_work);
|
||||
|
|
|
@ -638,6 +638,10 @@ static void native_flush_hash_range(unsigned long number, int local)
|
|||
unsigned long psize = batch->psize;
|
||||
int ssize = batch->ssize;
|
||||
int i;
|
||||
unsigned int use_local;
|
||||
|
||||
use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
|
||||
mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
|
@ -667,8 +671,7 @@ static void native_flush_hash_range(unsigned long number, int local)
|
|||
} pte_iterate_hashed_end();
|
||||
}
|
||||
|
||||
if (mmu_has_feature(MMU_FTR_TLBIEL) &&
|
||||
mmu_psize_defs[psize].tlbiel && local) {
|
||||
if (use_local) {
|
||||
asm volatile("ptesync":::"memory");
|
||||
for (i = 0; i < number; i++) {
|
||||
vpn = batch->vpn[i];
|
||||
|
|
|
@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *start, unsigned long size)
|
|||
|
||||
unsigned long decompress_kernel(void)
|
||||
{
|
||||
unsigned long output_addr;
|
||||
unsigned char *output;
|
||||
void *output, *kernel_end;
|
||||
|
||||
output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
|
||||
check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
|
||||
memset(&_bss, 0, &_ebss - &_bss);
|
||||
free_mem_ptr = (unsigned long)&_end;
|
||||
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
|
||||
output = (unsigned char *) output_addr;
|
||||
output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
|
||||
kernel_end = output + SZ__bss_start;
|
||||
check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
/*
|
||||
* Move the initrd right behind the end of the decompressed
|
||||
* kernel image.
|
||||
* kernel image. This also prevents initrd corruption caused by
|
||||
* bss clearing since kernel_end will always be located behind the
|
||||
* current bss section..
|
||||
*/
|
||||
if (INITRD_START && INITRD_SIZE &&
|
||||
INITRD_START < (unsigned long) output + SZ__bss_start) {
|
||||
check_ipl_parmblock(output + SZ__bss_start,
|
||||
INITRD_START + INITRD_SIZE);
|
||||
memmove(output + SZ__bss_start,
|
||||
(void *) INITRD_START, INITRD_SIZE);
|
||||
INITRD_START = (unsigned long) output + SZ__bss_start;
|
||||
if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
|
||||
check_ipl_parmblock(kernel_end, INITRD_SIZE);
|
||||
memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
|
||||
INITRD_START = (unsigned long) kernel_end;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
|
||||
* initialized afterwards since they reside in bss.
|
||||
*/
|
||||
memset(&_bss, 0, &_ebss - &_bss);
|
||||
free_mem_ptr = (unsigned long) &_end;
|
||||
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
|
||||
|
||||
puts("Uncompressing Linux... ");
|
||||
__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
|
||||
puts("Ok, booting the kernel.\n");
|
||||
|
|
|
@ -4,6 +4,5 @@
|
|||
#include <asm-generic/sections.h>
|
||||
|
||||
extern char _eshared[], _ehead[];
|
||||
extern char __start_ro_after_init[], __end_ro_after_init[];
|
||||
|
||||
#endif
|
||||
|
|
|
@ -147,7 +147,7 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from,
|
|||
" jg 2b\n" \
|
||||
".popsection\n" \
|
||||
EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
|
||||
: "=d" (__rc), "=Q" (*(to)) \
|
||||
: "=d" (__rc), "+Q" (*(to)) \
|
||||
: "d" (size), "Q" (*(from)), \
|
||||
"d" (__reg0), "K" (-EFAULT) \
|
||||
: "cc"); \
|
||||
|
|
|
@ -909,13 +909,11 @@ void __init smp_prepare_boot_cpu(void)
|
|||
{
|
||||
struct pcpu *pcpu = pcpu_devices;
|
||||
|
||||
WARN_ON(!cpu_present(0) || !cpu_online(0));
|
||||
pcpu->state = CPU_STATE_CONFIGURED;
|
||||
pcpu->address = stap();
|
||||
pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
|
||||
S390_lowcore.percpu_offset = __per_cpu_offset[0];
|
||||
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
|
||||
set_cpu_present(0, true);
|
||||
set_cpu_online(0, true);
|
||||
}
|
||||
|
||||
void __init smp_cpus_done(unsigned int max_cpus)
|
||||
|
@ -924,6 +922,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
|||
|
||||
void __init smp_setup_processor_id(void)
|
||||
{
|
||||
pcpu_devices[0].address = stap();
|
||||
S390_lowcore.cpu_nr = 0;
|
||||
S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
|
||||
}
|
||||
|
|
|
@ -63,11 +63,9 @@ SECTIONS
|
|||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__start_ro_after_init = .;
|
||||
__start_data_ro_after_init = .;
|
||||
.data..ro_after_init : {
|
||||
*(.data..ro_after_init)
|
||||
}
|
||||
__end_data_ro_after_init = .;
|
||||
EXCEPTION_TABLE(16)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__end_ro_after_init = .;
|
||||
|
|
|
@ -168,8 +168,7 @@ union page_table_entry {
|
|||
unsigned long z : 1; /* Zero Bit */
|
||||
unsigned long i : 1; /* Page-Invalid Bit */
|
||||
unsigned long p : 1; /* DAT-Protection Bit */
|
||||
unsigned long co : 1; /* Change-Recording Override */
|
||||
unsigned long : 8;
|
||||
unsigned long : 9;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -745,8 +744,6 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
|||
return PGM_PAGE_TRANSLATION;
|
||||
if (pte.z)
|
||||
return PGM_TRANSLATION_SPEC;
|
||||
if (pte.co && !edat1)
|
||||
return PGM_TRANSLATION_SPEC;
|
||||
dat_protection |= pte.p;
|
||||
raddr.pfra = pte.pfra;
|
||||
real_address:
|
||||
|
@ -1182,7 +1179,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
|
|||
rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
|
||||
if (!rc && pte.i)
|
||||
rc = PGM_PAGE_TRANSLATION;
|
||||
if (!rc && (pte.z || (pte.co && sg->edat_level < 1)))
|
||||
if (!rc && pte.z)
|
||||
rc = PGM_TRANSLATION_SPEC;
|
||||
shadow_page:
|
||||
pte.p |= dat_protection;
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
#define HPAGE_SHIFT 23
|
||||
#define REAL_HPAGE_SHIFT 22
|
||||
#define HPAGE_2GB_SHIFT 31
|
||||
#define HPAGE_256MB_SHIFT 28
|
||||
#define HPAGE_64K_SHIFT 16
|
||||
#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT)
|
||||
|
@ -27,7 +28,7 @@
|
|||
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
|
||||
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
|
||||
#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
|
||||
#define HUGE_MAX_HSTATE 3
|
||||
#define HUGE_MAX_HSTATE 4
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
|
|
@ -679,6 +679,14 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
|
|||
return pte_pfn(pte);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMD_WRITE
|
||||
static inline unsigned long pmd_write(pmd_t pmd)
|
||||
{
|
||||
pte_t pte = __pte(pmd_val(pmd));
|
||||
|
||||
return pte_write(pte);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
static inline unsigned long pmd_dirty(pmd_t pmd)
|
||||
{
|
||||
|
@ -694,13 +702,6 @@ static inline unsigned long pmd_young(pmd_t pmd)
|
|||
return pte_young(pte);
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_write(pmd_t pmd)
|
||||
{
|
||||
pte_t pte = __pte(pmd_val(pmd));
|
||||
|
||||
return pte_write(pte);
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_trans_huge(pmd_t pmd)
|
||||
{
|
||||
pte_t pte = __pte(pmd_val(pmd));
|
||||
|
|
|
@ -18,12 +18,6 @@
|
|||
#include <asm/signal.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
/*
|
||||
* The sparc has no problems with write protection
|
||||
*/
|
||||
#define wp_works_ok 1
|
||||
#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
|
||||
|
||||
/* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too...
|
||||
* That one page is used to protect kernel from intruders, so that
|
||||
* we can make our access_ok test faster
|
||||
|
|
|
@ -18,10 +18,6 @@
|
|||
#include <asm/ptrace.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
/* The sparc has no problems with write protection */
|
||||
#define wp_works_ok 1
|
||||
#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
|
||||
|
||||
/*
|
||||
* User lives in his very own context, and cannot reference us. Note
|
||||
* that TASK_SIZE is a misnomer, it really gives maximum user virtual
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче