Merge branch 'for-linus' into for-next

Conflicts:
	drivers/gpu/drm/i915/intel_pm.c
This commit is contained in:
Takashi Iwai 2015-12-23 08:33:34 +01:00
Родитель de5126cc3c 0fb0b822d1
Коммит 59c8231089
522 изменённых файлов: 4116 добавлений и 2546 удалений

Просмотреть файл

@ -22,8 +22,7 @@ Required properties:
Optional properties:
- ti,hwmods: Name of the hwmods associated to the eDMA CC
- ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow
these channels will be SW triggered channels. The list must
contain 16 bits numbers, see example.
these channels will be SW triggered channels. See example.
- ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by
the driver, they are allocated to be used by for example the
DSP. See example.
@ -56,10 +55,9 @@ edma: edma@49000000 {
ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>;
/* Channel 20 and 21 is allocated for memcpy */
ti,edma-memcpy-channels = /bits/ 16 <20 21>;
/* The following PaRAM slots are reserved: 35-45 and 100-110 */
ti,edma-reserved-slot-ranges = /bits/ 16 <35 10>,
/bits/ 16 <100 10>;
ti,edma-memcpy-channels = <20 21>;
/* The following PaRAM slots are reserved: 35-44 and 100-109 */
ti,edma-reserved-slot-ranges = <35 10>, <100 10>;
};
edma_tptc0: tptc@49800000 {

Просмотреть файл

@ -11,6 +11,10 @@ Required properties:
0 = active high
1 = active low
Optional properties:
- little-endian : GPIO registers are used as little endian. If not
present registers are used as big endian by default.
Example:
gpio0: gpio@1100 {

Просмотреть файл

@ -12,7 +12,7 @@ Each key is represented as a sub-node of "allwinner,sun4i-a10-lradc-keys":
Required subnode-properties:
- label: Descriptive name of the key.
- linux,code: Keycode to emit.
- channel: Channel this key is attached to, mut be 0 or 1.
- channel: Channel this key is attached to, must be 0 or 1.
- voltage: Voltage in µV at lradc input when this key is pressed.
Example:

Просмотреть файл

@ -6,7 +6,9 @@ used for what purposes, but which don't use an on-flash partition table such
as RedBoot.
The partition table should be a subnode of the mtd node and should be named
'partitions'. Partitions are defined in subnodes of the partitions node.
'partitions'. This node should have the following property:
- compatible : (required) must be "fixed-partitions"
Partitions are then defined in subnodes of the partitions node.
For backwards compatibility partitions as direct subnodes of the mtd device are
supported. This use is discouraged.
@ -36,6 +38,7 @@ Examples:
flash@0 {
partitions {
compatible = "fixed-partitions";
#address-cells = <1>;
#size-cells = <1>;
@ -53,6 +56,7 @@ flash@0 {
flash@1 {
partitions {
compatible = "fixed-partitions";
#address-cells = <1>;
#size-cells = <2>;
@ -66,6 +70,7 @@ flash@1 {
flash@2 {
partitions {
compatible = "fixed-partitions";
#address-cells = <2>;
#size-cells = <2>;

Просмотреть файл

@ -181,17 +181,3 @@ For general information, go to the Intel support website at:
If an issue is identified with the released source code on the supported
kernel with a supported adapter, email the specific information related to the
issue to e1000-devel@lists.sourceforge.net.
License
=======
This software program is released under the terms of a license agreement
between you ('Licensee') and Intel. Do not use or load this software or any
associated materials (collectively, the 'Software') until you have carefully
read the full terms and conditions of the file COPYING located in this software
package. By loading or using the Software, you agree to the terms of this
Agreement. If you do not agree with the terms of this Agreement, do not install
or use the Software.
* Other names and brands may be claimed as the property of others.

Просмотреть файл

@ -2975,6 +2975,7 @@ F: kernel/cpuset.c
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org>
M: Michal Hocko <mhocko@kernel.org>
M: Vladimir Davydov <vdavydov@virtuozzo.com>
L: cgroups@vger.kernel.org
L: linux-mm@kvack.org
S: Maintained
@ -5577,7 +5578,7 @@ R: Jesse Brandeburg <jesse.brandeburg@intel.com>
R: Shannon Nelson <shannon.nelson@intel.com>
R: Carolyn Wyborny <carolyn.wyborny@intel.com>
R: Don Skidmore <donald.c.skidmore@intel.com>
R: Matthew Vick <matthew.vick@intel.com>
R: Bruce Allan <bruce.w.allan@intel.com>
R: John Ronciak <john.ronciak@intel.com>
R: Mitch Williams <mitch.a.williams@intel.com>
L: intel-wired-lan@lists.osuosl.org
@ -8286,7 +8287,7 @@ F: include/linux/delayacct.h
F: kernel/delayacct.c
PERFORMANCE EVENTS SUBSYSTEM
M: Peter Zijlstra <a.p.zijlstra@chello.nl>
M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
M: Arnaldo Carvalho de Melo <acme@kernel.org>
L: linux-kernel@vger.kernel.org
@ -8379,6 +8380,14 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
S: Maintained
F: drivers/pinctrl/samsung/
PIN CONTROLLER - SINGLE
M: Tony Lindgren <tony@atomide.com>
M: Haojian Zhuang <haojian.zhuang@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-omap@vger.kernel.org
S: Maintained
F: drivers/pinctrl/pinctrl-single.c
PIN CONTROLLER - ST SPEAR
M: Viresh Kumar <vireshk@kernel.org>
L: spear-devel@list.st.com
@ -8945,6 +8954,13 @@ F: drivers/rpmsg/
F: Documentation/rpmsg.txt
F: include/linux/rpmsg.h
RENESAS ETHERNET DRIVERS
R: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
L: netdev@vger.kernel.org
L: linux-sh@vger.kernel.org
F: drivers/net/ethernet/renesas/
F: include/linux/sh_eth.h
RESET CONTROLLER FRAMEWORK
M: Philipp Zabel <p.zabel@pengutronix.de>
S: Maintained

Просмотреть файл

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 0
EXTRAVERSION = -rc4
EXTRAVERSION = -rc6
NAME = Blurry Fish Butt
# *DOCUMENTATION*

Просмотреть файл

@ -445,6 +445,7 @@ config LINUX_LINK_BASE
However some customers have peripherals mapped at this addr, so
Linux needs to be scooted a bit.
If you don't know what the above means, leave this setting alone.
This needs to match memory start address specified in Device Tree
config HIGHMEM
bool "High Memory Support"

Просмотреть файл

@ -46,6 +46,7 @@
snps,pbl = < 32 >;
clocks = <&apbclk>;
clock-names = "stmmaceth";
max-speed = <100>;
};
ehci@0x40000 {

Просмотреть файл

@ -17,7 +17,8 @@
memory {
device_type = "memory";
reg = <0x0 0x80000000 0x0 0x40000000 /* 1 GB low mem */
/* CONFIG_LINUX_LINK_BASE needs to match low mem start */
reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MB low mem */
0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */
};

Просмотреть файл

@ -23,7 +23,7 @@
* @dt_compat: Array of device tree 'compatible' strings
* (XXX: although only 1st entry is looked at)
* @init_early: Very early callback [called from setup_arch()]
* @init_cpu_smp: for each CPU as it is coming up (SMP as well as UP)
* @init_per_cpu: for each CPU as it is coming up (SMP as well as UP)
* [(M):init_IRQ(), (o):start_kernel_secondary()]
* @init_machine: arch initcall level callback (e.g. populate static
* platform devices or parse Devicetree)
@ -35,7 +35,7 @@ struct machine_desc {
const char **dt_compat;
void (*init_early)(void);
#ifdef CONFIG_SMP
void (*init_cpu_smp)(unsigned int);
void (*init_per_cpu)(unsigned int);
#endif
void (*init_machine)(void);
void (*init_late)(void);

Просмотреть файл

@ -48,7 +48,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
* @init_early_smp: A SMP specific h/w block can init itself
* Could be common across platforms so not covered by
* mach_desc->init_early()
* @init_irq_cpu: Called for each core so SMP h/w block driver can do
* @init_per_cpu: Called for each core so SMP h/w block driver can do
* any needed setup per cpu (e.g. IPI request)
* @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
* @ipi_send: To send IPI to a @cpu
@ -57,7 +57,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
struct plat_smp_ops {
const char *info;
void (*init_early_smp)(void);
void (*init_irq_cpu)(int cpu);
void (*init_per_cpu)(int cpu);
void (*cpu_kick)(int cpu, unsigned long pc);
void (*ipi_send)(int cpu);
void (*ipi_clear)(int irq);

Просмотреть файл

@ -112,7 +112,6 @@ struct unwind_frame_info {
extern int arc_unwind(struct unwind_frame_info *frame);
extern void arc_unwind_init(void);
extern void arc_unwind_setup(void);
extern void *unwind_add_table(struct module *module, const void *table_start,
unsigned long table_size);
extern void unwind_remove_table(void *handle, int init_only);
@ -152,9 +151,6 @@ static inline void arc_unwind_init(void)
{
}
static inline void arc_unwind_setup(void)
{
}
#define unwind_add_table(a, b, c)
#define unwind_remove_table(a, b)

Просмотреть файл

@ -106,10 +106,21 @@ static struct irq_chip arcv2_irq_chip = {
static int arcv2_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
if (irq == TIMER0_IRQ || irq == IPI_IRQ)
/*
* core intc IRQs [16, 23]:
* Statically assigned always private-per-core (Timers, WDT, IPI, PCT)
*/
if (hw < 24) {
/*
* A subsequent request_percpu_irq() fails if percpu_devid is
* not set. That in turns sets NOAUTOEN, meaning each core needs
* to call enable_percpu_irq()
*/
irq_set_percpu_devid(irq);
irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq);
else
} else {
irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq);
}
return 0;
}

Просмотреть файл

@ -29,11 +29,11 @@ void __init init_IRQ(void)
#ifdef CONFIG_SMP
/* a SMP H/w block could do IPI IRQ request here */
if (plat_smp_ops.init_irq_cpu)
plat_smp_ops.init_irq_cpu(smp_processor_id());
if (plat_smp_ops.init_per_cpu)
plat_smp_ops.init_per_cpu(smp_processor_id());
if (machine_desc->init_cpu_smp)
machine_desc->init_cpu_smp(smp_processor_id());
if (machine_desc->init_per_cpu)
machine_desc->init_per_cpu(smp_processor_id());
#endif
}
@ -51,6 +51,18 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
set_irq_regs(old_regs);
}
/*
* API called for requesting percpu interrupts - called by each CPU
* - For boot CPU, actually request the IRQ with genirq core + enables
* - For subsequent callers only enable called locally
*
* Relies on being called by boot cpu first (i.e. request called ahead) of
* any enable as expected by genirq. Hence Suitable only for TIMER, IPI
* which are guaranteed to be setup on boot core first.
* Late probed peripherals such as perf can't use this as there no guarantee
* of being called on boot CPU first.
*/
void arc_request_percpu_irq(int irq, int cpu,
irqreturn_t (*isr)(int irq, void *dev),
const char *irq_nm,
@ -60,14 +72,17 @@ void arc_request_percpu_irq(int irq, int cpu,
if (!cpu) {
int rc;
#ifdef CONFIG_ISA_ARCOMPACT
/*
* These 2 calls are essential to making percpu IRQ APIs work
* Ideally these details could be hidden in irq chip map function
* but the issue is IPIs IRQs being static (non-DT) and platform
* specific, so we can't identify them there.
* A subsequent request_percpu_irq() fails if percpu_devid is
* not set. That in turns sets NOAUTOEN, meaning each core needs
* to call enable_percpu_irq()
*
* For ARCv2, this is done in irq map function since we know
* which irqs are strictly per cpu
*/
irq_set_percpu_devid(irq);
irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */
#endif
rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
if (rc)

Просмотреть файл

@ -132,7 +132,7 @@ static void mcip_probe_n_setup(void)
struct plat_smp_ops plat_smp_ops = {
.info = smp_cpuinfo_buf,
.init_early_smp = mcip_probe_n_setup,
.init_irq_cpu = mcip_setup_per_cpu,
.init_per_cpu = mcip_setup_per_cpu,
.ipi_send = mcip_ipi_send,
.ipi_clear = mcip_ipi_clear,
};

Просмотреть файл

@ -428,12 +428,11 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
#endif /* CONFIG_ISA_ARCV2 */
void arc_cpu_pmu_irq_init(void)
static void arc_cpu_pmu_irq_init(void *data)
{
struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
int irq = *(int *)data;
arc_request_percpu_irq(arc_pmu->irq, smp_processor_id(), arc_pmu_intr,
"ARC perf counters", pmu_cpu);
enable_percpu_irq(irq, IRQ_TYPE_NONE);
/* Clear all pending interrupt flags */
write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
@ -515,7 +514,6 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
if (has_interrupts) {
int irq = platform_get_irq(pdev, 0);
unsigned long flags;
if (irq < 0) {
pr_err("Cannot get IRQ number for the platform\n");
@ -524,24 +522,12 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
arc_pmu->irq = irq;
/*
* arc_cpu_pmu_irq_init() needs to be called on all cores for
* their respective local PMU.
* However we use opencoded on_each_cpu() to ensure it is called
* on core0 first, so that arc_request_percpu_irq() sets up
* AUTOEN etc. Otherwise enable_percpu_irq() fails to enable
* perf IRQ on non master cores.
* see arc_request_percpu_irq()
*/
preempt_disable();
local_irq_save(flags);
arc_cpu_pmu_irq_init();
local_irq_restore(flags);
smp_call_function((smp_call_func_t)arc_cpu_pmu_irq_init, 0, 1);
preempt_enable();
/* intc map function ensures irq_set_percpu_devid() called */
request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
this_cpu_ptr(&arc_pmu_cpu));
on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
/* Clean all pending interrupt flags */
write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
} else
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;

Просмотреть файл

@ -429,7 +429,6 @@ void __init setup_arch(char **cmdline_p)
#endif
arc_unwind_init();
arc_unwind_setup();
}
static int __init customize_machine(void)

Просмотреть файл

@ -132,11 +132,11 @@ void start_kernel_secondary(void)
pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
/* Some SMP H/w setup - for each cpu */
if (plat_smp_ops.init_irq_cpu)
plat_smp_ops.init_irq_cpu(cpu);
if (plat_smp_ops.init_per_cpu)
plat_smp_ops.init_per_cpu(cpu);
if (machine_desc->init_cpu_smp)
machine_desc->init_cpu_smp(cpu);
if (machine_desc->init_per_cpu)
machine_desc->init_per_cpu(cpu);
arc_local_timer_setup();

Просмотреть файл

@ -170,6 +170,23 @@ static struct unwind_table *find_table(unsigned long pc)
static unsigned long read_pointer(const u8 **pLoc,
const void *end, signed ptrType);
static void init_unwind_hdr(struct unwind_table *table,
void *(*alloc) (unsigned long));
/*
* wrappers for header alloc (vs. calling one vs. other at call site)
* to elide section mismatches warnings
*/
static void *__init unw_hdr_alloc_early(unsigned long sz)
{
return __alloc_bootmem_nopanic(sz, sizeof(unsigned int),
MAX_DMA_ADDRESS);
}
static void *unw_hdr_alloc(unsigned long sz)
{
return kmalloc(sz, GFP_KERNEL);
}
static void init_unwind_table(struct unwind_table *table, const char *name,
const void *core_start, unsigned long core_size,
@ -209,6 +226,8 @@ void __init arc_unwind_init(void)
__start_unwind, __end_unwind - __start_unwind,
NULL, 0);
/*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
init_unwind_hdr(&root_table, unw_hdr_alloc_early);
}
static const u32 bad_cie, not_fde;
@ -241,7 +260,7 @@ static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
e2->fde = v;
}
static void __init setup_unwind_table(struct unwind_table *table,
static void init_unwind_hdr(struct unwind_table *table,
void *(*alloc) (unsigned long))
{
const u8 *ptr;
@ -274,13 +293,13 @@ static void __init setup_unwind_table(struct unwind_table *table,
const u32 *cie = cie_for_fde(fde, table);
signed ptrType;
if (cie == &not_fde)
if (cie == &not_fde) /* only process FDE here */
continue;
if (cie == NULL || cie == &bad_cie)
return;
continue; /* say FDE->CIE.version != 1 */
ptrType = fde_pointer_type(cie);
if (ptrType < 0)
return;
continue;
ptr = (const u8 *)(fde + 2);
if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
@ -300,9 +319,11 @@ static void __init setup_unwind_table(struct unwind_table *table,
hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
+ 2 * n * sizeof(unsigned long);
header = alloc(hdrSize);
if (!header)
return;
header->version = 1;
header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
@ -322,6 +343,10 @@ static void __init setup_unwind_table(struct unwind_table *table,
if (fde[1] == 0xffffffff)
continue; /* this is a CIE */
if (*(u8 *)(cie + 2) != 1)
continue; /* FDE->CIE.version not supported */
ptr = (const u8 *)(fde + 2);
header->table[n].start = read_pointer(&ptr,
(const u8 *)(fde + 1) +
@ -342,18 +367,6 @@ static void __init setup_unwind_table(struct unwind_table *table,
table->header = (const void *)header;
}
static void *__init balloc(unsigned long sz)
{
return __alloc_bootmem_nopanic(sz,
sizeof(unsigned int),
__pa(MAX_DMA_ADDRESS));
}
void __init arc_unwind_setup(void)
{
setup_unwind_table(&root_table, balloc);
}
#ifdef CONFIG_MODULES
static struct unwind_table *last_table;
@ -377,6 +390,8 @@ void *unwind_add_table(struct module *module, const void *table_start,
table_start, table_size,
NULL, 0);
init_unwind_hdr(table, unw_hdr_alloc);
#ifdef UNWIND_DEBUG
unw_debug("Table added for [%s] %lx %lx\n",
module->name, table->core.pc, table->core.range);
@ -439,6 +454,7 @@ void unwind_remove_table(void *handle, int init_only)
info.init_only = init_only;
unlink_table(&info); /* XXX: SMP */
kfree(table->header);
kfree(table);
}
@ -507,7 +523,8 @@ static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
|| (*cie & (sizeof(*cie) - 1))
|| (cie[1] != 0xffffffff))
|| (cie[1] != 0xffffffff)
|| ( *(u8 *)(cie + 2) != 1)) /* version 1 supported */
return NULL; /* this is not a (valid) CIE */
return cie;
}

Просмотреть файл

@ -51,7 +51,9 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
int in_use = 0;
if (!low_mem_sz) {
BUG_ON(base != low_mem_start);
if (base != low_mem_start)
panic("CONFIG_LINUX_LINK_BASE != DT memory { }");
low_mem_sz = size;
in_use = 1;
} else {

Просмотреть файл

@ -74,7 +74,7 @@
reg = <0x48240200 0x100>;
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
clocks = <&dpll_mpu_m2_ck>;
clocks = <&mpu_periphclk>;
};
local_timer: timer@48240600 {
@ -82,7 +82,7 @@
reg = <0x48240600 0x100>;
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
clocks = <&dpll_mpu_m2_ck>;
clocks = <&mpu_periphclk>;
};
l2-cache-controller@48242000 {

Просмотреть файл

@ -259,6 +259,14 @@
ti,invert-autoidle-bit;
};
mpu_periphclk: mpu_periphclk {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
clocks = <&dpll_mpu_m2_ck>;
clock-mult = <1>;
clock-div = <2>;
};
dpll_ddr_ck: dpll_ddr_ck {
#clock-cells = <0>;
compatible = "ti,am3-dpll-clock";

Просмотреть файл

@ -184,6 +184,7 @@
regulator-name = "VDD_SDHC_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
};
};
};

Просмотреть файл

@ -118,7 +118,8 @@
sdhci0: sdhci@ab0000 {
compatible = "mrvl,pxav3-mmc";
reg = <0xab0000 0x200>;
clocks = <&chip_clk CLKID_SDIO1XIN>;
clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
clock-names = "io", "core";
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@ -126,7 +127,8 @@
sdhci1: sdhci@ab0800 {
compatible = "mrvl,pxav3-mmc";
reg = <0xab0800 0x200>;
clocks = <&chip_clk CLKID_SDIO1XIN>;
clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
clock-names = "io", "core";
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@ -135,7 +137,7 @@
compatible = "mrvl,pxav3-mmc";
reg = <0xab1000 0x200>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_NFC>;
clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_SDIO>;
clock-names = "io", "core";
status = "disabled";
};

Просмотреть файл

@ -218,6 +218,7 @@
reg = <0x480c8000 0x2000>;
interrupts = <77>;
ti,hwmods = "mailbox";
#mbox-cells = <1>;
ti,mbox-num-users = <4>;
ti,mbox-num-fifos = <12>;
mbox_dsp: mbox_dsp {
@ -279,8 +280,11 @@
ti,spi-num-cs = <4>;
ti,hwmods = "mcspi1";
dmas = <&edma 16 &edma 17
&edma 18 &edma 19>;
dma-names = "tx0", "rx0", "tx1", "rx1";
&edma 18 &edma 19
&edma 20 &edma 21
&edma 22 &edma 23>;
dma-names = "tx0", "rx0", "tx1", "rx1",
"tx2", "rx2", "tx3", "rx3";
};
mmc1: mmc@48060000 {

Просмотреть файл

@ -18,8 +18,3 @@
reg = <0x80000000 0x10000000>;
};
};
&L2 {
arm,data-latency = <2 1 2>;
arm,tag-latency = <3 2 3>;
};

Просмотреть файл

@ -19,7 +19,7 @@
reg = <0x40006000 0x1000>;
cache-unified;
cache-level = <2>;
arm,data-latency = <1 1 1>;
arm,data-latency = <3 3 3>;
arm,tag-latency = <2 2 2>;
};
};

Просмотреть файл

@ -178,8 +178,10 @@
compatible = "fsl,vf610-sai";
reg = <0x40031000 0x1000>;
interrupts = <86 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_SAI2>;
clock-names = "sai";
clocks = <&clks VF610_CLK_SAI2>,
<&clks VF610_CLK_SAI2_DIV>,
<&clks 0>, <&clks 0>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
dma-names = "tx", "rx";
dmas = <&edma0 0 21>,
<&edma0 0 20>;

Просмотреть файл

@ -21,6 +21,7 @@
#ifndef __ASSEMBLY__
#include <linux/io.h>
#include <asm/barrier.h>
#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2
#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm

Просмотреть файл

@ -510,10 +510,14 @@ __copy_to_user_std(void __user *to, const void *from, unsigned long n);
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
#ifndef CONFIG_UACCESS_WITH_MEMCPY
unsigned int __ua_flags = uaccess_save_and_enable();
n = arm_copy_to_user(to, from, n);
uaccess_restore(__ua_flags);
return n;
#else
return arm_copy_to_user(to, from, n);
#endif
}
extern unsigned long __must_check

Просмотреть файл

@ -95,6 +95,22 @@ void __show_regs(struct pt_regs *regs)
{
unsigned long flags;
char buf[64];
#ifndef CONFIG_CPU_V7M
unsigned int domain;
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Get the domain register for the parent context. In user
* mode, we don't save the DACR, so lets use what it should
* be. For other modes, we place it after the pt_regs struct.
*/
if (user_mode(regs))
domain = DACR_UACCESS_ENABLE;
else
domain = *(unsigned int *)(regs + 1);
#else
domain = get_domain();
#endif
#endif
show_regs_print_info(KERN_DEFAULT);
@ -123,21 +139,8 @@ void __show_regs(struct pt_regs *regs)
#ifndef CONFIG_CPU_V7M
{
unsigned int domain = get_domain();
const char *segment;
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Get the domain register for the parent context. In user
* mode, we don't save the DACR, so lets use what it should
* be. For other modes, we place it after the pt_regs struct.
*/
if (user_mode(regs))
domain = DACR_UACCESS_ENABLE;
else
domain = *(unsigned int *)(regs + 1);
#endif
if ((domain & domain_mask(DOMAIN_USER)) ==
domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
segment = "none";
@ -163,11 +166,11 @@ void __show_regs(struct pt_regs *regs)
buf[0] = '\0';
#ifdef CONFIG_CPU_CP15_MMU
{
unsigned int transbase, dac = get_domain();
unsigned int transbase;
asm("mrc p15, 0, %0, c2, c0\n\t"
: "=r" (transbase));
snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
transbase, dac);
transbase, domain);
}
#endif
asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));

Просмотреть файл

@ -36,10 +36,10 @@
*/
#define __user_swpX_asm(data, addr, res, temp, B) \
__asm__ __volatile__( \
" mov %2, %1\n" \
"0: ldrex"B" %1, [%3]\n" \
"1: strex"B" %0, %2, [%3]\n" \
"0: ldrex"B" %2, [%3]\n" \
"1: strex"B" %0, %1, [%3]\n" \
" cmp %0, #0\n" \
" moveq %1, %2\n" \
" movne %0, %4\n" \
"2:\n" \
" .section .text.fixup,\"ax\"\n" \

Просмотреть файл

@ -88,6 +88,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
static unsigned long noinline
__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
{
unsigned long ua_flags;
int atomic;
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
@ -118,7 +119,9 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
if (tocopy > n)
tocopy = n;
ua_flags = uaccess_save_and_enable();
memcpy((void *)to, from, tocopy);
uaccess_restore(ua_flags);
to += tocopy;
from += tocopy;
n -= tocopy;
@ -145,14 +148,21 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
* With frame pointer disabled, tail call optimization kicks in
* as well making this test almost invisible.
*/
if (n < 64)
return __copy_to_user_std(to, from, n);
return __copy_to_user_memcpy(to, from, n);
if (n < 64) {
unsigned long ua_flags = uaccess_save_and_enable();
n = __copy_to_user_std(to, from, n);
uaccess_restore(ua_flags);
} else {
n = __copy_to_user_memcpy(to, from, n);
}
return n;
}
static unsigned long noinline
__clear_user_memset(void __user *addr, unsigned long n)
{
unsigned long ua_flags;
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
memset((void *)addr, 0, n);
return 0;
@ -175,7 +185,9 @@ __clear_user_memset(void __user *addr, unsigned long n)
if (tocopy > n)
tocopy = n;
ua_flags = uaccess_save_and_enable();
memset((void *)addr, 0, tocopy);
uaccess_restore(ua_flags);
addr += tocopy;
n -= tocopy;
@ -193,9 +205,14 @@ out:
unsigned long arm_clear_user(void __user *addr, unsigned long n)
{
/* See rational for this in __copy_to_user() above. */
if (n < 64)
return __clear_user_std(addr, n);
return __clear_user_memset(addr, n);
if (n < 64) {
unsigned long ua_flags = uaccess_save_and_enable();
n = __clear_user_std(addr, n);
uaccess_restore(ua_flags);
} else {
n = __clear_user_memset(addr, n);
}
return n;
}
#if 0

Просмотреть файл

@ -4,7 +4,6 @@ menuconfig ARCH_AT91
select ARCH_REQUIRE_GPIOLIB
select COMMON_CLK_AT91
select PINCTRL
select PINCTRL_AT91
select SOC_BUS
if ARCH_AT91
@ -17,6 +16,7 @@ config SOC_SAMA5D2
select HAVE_AT91_USB_CLK
select HAVE_AT91_H32MX
select HAVE_AT91_GENERATED_CLK
select PINCTRL_AT91PIO4
help
Select this if ou are using one of Atmel's SAMA5D2 family SoC.
@ -27,6 +27,7 @@ config SOC_SAMA5D3
select HAVE_AT91_UTMI
select HAVE_AT91_SMD
select HAVE_AT91_USB_CLK
select PINCTRL_AT91
help
Select this if you are using one of Atmel's SAMA5D3 family SoC.
This support covers SAMA5D31, SAMA5D33, SAMA5D34, SAMA5D35, SAMA5D36.
@ -40,6 +41,7 @@ config SOC_SAMA5D4
select HAVE_AT91_SMD
select HAVE_AT91_USB_CLK
select HAVE_AT91_H32MX
select PINCTRL_AT91
help
Select this if you are using one of Atmel's SAMA5D4 family SoC.
@ -50,6 +52,7 @@ config SOC_AT91RM9200
select CPU_ARM920T
select HAVE_AT91_USB_CLK
select MIGHT_HAVE_PCI
select PINCTRL_AT91
select SOC_SAM_V4_V5
select SRAM if PM
help
@ -65,6 +68,7 @@ config SOC_AT91SAM9
select HAVE_AT91_UTMI
select HAVE_FB_ATMEL
select MEMORY
select PINCTRL_AT91
select SOC_SAM_V4_V5
select SRAM if PM
help

Просмотреть файл

@ -41,8 +41,10 @@
* implementation should be moved down into the pinctrl driver and get
* called as part of the generic suspend/resume path.
*/
#ifdef CONFIG_PINCTRL_AT91
extern void at91_pinctrl_gpio_suspend(void);
extern void at91_pinctrl_gpio_resume(void);
#endif
static struct {
unsigned long uhp_udp_mask;
@ -151,8 +153,9 @@ static void at91_pm_suspend(suspend_state_t state)
static int at91_pm_enter(suspend_state_t state)
{
#ifdef CONFIG_PINCTRL_AT91
at91_pinctrl_gpio_suspend();
#endif
switch (state) {
/*
* Suspend-to-RAM is like STANDBY plus slow clock mode, so
@ -192,7 +195,9 @@ static int at91_pm_enter(suspend_state_t state)
error:
target_state = PM_SUSPEND_ON;
#ifdef CONFIG_PINCTRL_AT91
at91_pinctrl_gpio_resume();
#endif
return 0;
}

Просмотреть файл

@ -748,8 +748,12 @@ static void exynos5_powerdown_conf(enum sys_powerdown mode)
void exynos_sys_powerdown_conf(enum sys_powerdown mode)
{
unsigned int i;
const struct exynos_pmu_data *pmu_data;
const struct exynos_pmu_data *pmu_data = pmu_context->pmu_data;
if (!pmu_context)
return;
pmu_data = pmu_context->pmu_data;
if (pmu_data->powerdown_conf)
pmu_data->powerdown_conf(mode);

Просмотреть файл

@ -143,7 +143,7 @@ static inline void __indirect_writesl(volatile void __iomem *bus_addr,
writel(*vaddr++, bus_addr);
}
static inline unsigned char __indirect_readb(const volatile void __iomem *p)
static inline u8 __indirect_readb(const volatile void __iomem *p)
{
u32 addr = (u32)p;
u32 n, byte_enables, data;
@ -166,7 +166,7 @@ static inline void __indirect_readsb(const volatile void __iomem *bus_addr,
*vaddr++ = readb(bus_addr);
}
static inline unsigned short __indirect_readw(const volatile void __iomem *p)
static inline u16 __indirect_readw(const volatile void __iomem *p)
{
u32 addr = (u32)p;
u32 n, byte_enables, data;
@ -189,7 +189,7 @@ static inline void __indirect_readsw(const volatile void __iomem *bus_addr,
*vaddr++ = readw(bus_addr);
}
static inline unsigned long __indirect_readl(const volatile void __iomem *p)
static inline u32 __indirect_readl(const volatile void __iomem *p)
{
u32 addr = (__force u32)p;
u32 data;
@ -350,7 +350,7 @@ static inline void insl(u32 io_addr, void *p, u32 count)
((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
#define ioread8(p) ioread8(p)
static inline unsigned int ioread8(const void __iomem *addr)
static inline u8 ioread8(const void __iomem *addr)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
@ -378,7 +378,7 @@ static inline void ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
}
#define ioread16(p) ioread16(p)
static inline unsigned int ioread16(const void __iomem *addr)
static inline u16 ioread16(const void __iomem *addr)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
@ -407,7 +407,7 @@ static inline void ioread16_rep(const void __iomem *addr, void *vaddr,
}
#define ioread32(p) ioread32(p)
static inline unsigned int ioread32(const void __iomem *addr)
static inline u32 ioread32(const void __iomem *addr)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))

Просмотреть файл

@ -121,6 +121,7 @@ config ARCH_OMAP2PLUS_TYPICAL
select NEON if CPU_V7
select PM
select REGULATOR
select REGULATOR_FIXED_VOLTAGE
select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
select VFP
@ -201,7 +202,6 @@ config MACH_OMAP3_PANDORA
depends on ARCH_OMAP3
default y
select OMAP_PACKAGE_CBB
select REGULATOR_FIXED_VOLTAGE if REGULATOR
config MACH_NOKIA_N810
bool

Просмотреть файл

@ -889,6 +889,7 @@ static void __init e680_init(void)
pxa_set_keypad_info(&e680_keypad_platform_data);
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
platform_add_devices(ARRAY_AND_SIZE(e680_devices));
}
@ -956,6 +957,7 @@ static void __init a1200_init(void)
pxa_set_keypad_info(&a1200_keypad_platform_data);
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
platform_add_devices(ARRAY_AND_SIZE(a1200_devices));
}
@ -1148,6 +1150,7 @@ static void __init a910_init(void)
platform_device_register(&a910_camera);
}
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
platform_add_devices(ARRAY_AND_SIZE(a910_devices));
}
@ -1215,6 +1218,7 @@ static void __init e6_init(void)
pxa_set_keypad_info(&e6_keypad_platform_data);
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
platform_add_devices(ARRAY_AND_SIZE(e6_devices));
}
@ -1256,6 +1260,7 @@ static void __init e2_init(void)
pxa_set_keypad_info(&e2_keypad_platform_data);
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
platform_add_devices(ARRAY_AND_SIZE(e2_devices));
}

Просмотреть файл

@ -20,7 +20,7 @@
#include <plat/cpu.h>
#include <plat/cpu-freq-core.h>
static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = {
static struct cpufreq_frequency_table s3c2440_plls_12[] = {
{ .frequency = 75000000, .driver_data = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */
{ .frequency = 80000000, .driver_data = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */
{ .frequency = 90000000, .driver_data = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */

Просмотреть файл

@ -20,7 +20,7 @@
#include <plat/cpu.h>
#include <plat/cpu-freq-core.h>
static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = {
static struct cpufreq_frequency_table s3c2440_plls_169344[] = {
{ .frequency = 78019200, .driver_data = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */
{ .frequency = 84067200, .driver_data = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */
{ .frequency = 90115200, .driver_data = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */

Просмотреть файл

@ -165,13 +165,28 @@ static void flush_context(unsigned int cpu)
__flush_icache_all();
}
static int is_reserved_asid(u64 asid)
static bool check_update_reserved_asid(u64 asid, u64 newasid)
{
int cpu;
for_each_possible_cpu(cpu)
if (per_cpu(reserved_asids, cpu) == asid)
return 1;
return 0;
bool hit = false;
/*
* Iterate over the set of reserved ASIDs looking for a match.
* If we find one, then we can update our mm to use newasid
* (i.e. the same ASID in the current generation) but we can't
* exit the loop early, since we need to ensure that all copies
* of the old ASID are updated to reflect the mm. Failure to do
* so could result in us missing the reserved ASID in a future
* generation.
*/
for_each_possible_cpu(cpu) {
if (per_cpu(reserved_asids, cpu) == asid) {
hit = true;
per_cpu(reserved_asids, cpu) = newasid;
}
}
return hit;
}
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
@ -181,12 +196,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
u64 generation = atomic64_read(&asid_generation);
if (asid != 0) {
u64 newasid = generation | (asid & ~ASID_MASK);
/*
* If our current ASID was active during a rollover, we
* can continue to use it and this was just a false alarm.
*/
if (is_reserved_asid(asid))
return generation | (asid & ~ASID_MASK);
if (check_update_reserved_asid(asid, newasid))
return newasid;
/*
* We had a valid ASID in a previous life, so try to re-use
@ -194,7 +211,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
*/
asid &= ~ASID_MASK;
if (!__test_and_set_bit(asid, asid_map))
goto bump_gen;
return newasid;
}
/*
@ -216,11 +233,8 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
__set_bit(asid, asid_map);
cur_idx = asid;
bump_gen:
asid |= generation;
cpumask_clear(mm_cpumask(mm));
return asid;
return asid | generation;
}
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)

Просмотреть файл

@ -1521,7 +1521,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
return -ENOMEM;
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
phys_addr_t phys = sg_phys(s) & PAGE_MASK;
phys_addr_t phys = page_to_phys(sg_page(s));
unsigned int len = PAGE_ALIGN(s->offset + s->length);
if (!is_coherent &&

Просмотреть файл

@ -22,6 +22,7 @@
#include <linux/memblock.h>
#include <linux/dma-contiguous.h>
#include <linux/sizes.h>
#include <linux/stop_machine.h>
#include <asm/cp15.h>
#include <asm/mach-types.h>
@ -627,12 +628,10 @@ static struct section_perm ro_perms[] = {
* safe to be called with preemption disabled, as under stop_machine().
*/
static inline void section_update(unsigned long addr, pmdval_t mask,
pmdval_t prot)
pmdval_t prot, struct mm_struct *mm)
{
struct mm_struct *mm;
pmd_t *pmd;
mm = current->active_mm;
pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
#ifdef CONFIG_ARM_LPAE
@ -656,49 +655,82 @@ static inline bool arch_has_strict_perms(void)
return !!(get_cr() & CR_XP);
}
#define set_section_perms(perms, field) { \
size_t i; \
unsigned long addr; \
\
if (!arch_has_strict_perms()) \
return; \
\
for (i = 0; i < ARRAY_SIZE(perms); i++) { \
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \
pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
perms[i].start, perms[i].end, \
SECTION_SIZE); \
continue; \
} \
\
for (addr = perms[i].start; \
addr < perms[i].end; \
addr += SECTION_SIZE) \
section_update(addr, perms[i].mask, \
perms[i].field); \
} \
void set_section_perms(struct section_perm *perms, int n, bool set,
struct mm_struct *mm)
{
size_t i;
unsigned long addr;
if (!arch_has_strict_perms())
return;
for (i = 0; i < n; i++) {
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
pr_err("BUG: section %lx-%lx not aligned to %lx\n",
perms[i].start, perms[i].end,
SECTION_SIZE);
continue;
}
static inline void fix_kernmem_perms(void)
for (addr = perms[i].start;
addr < perms[i].end;
addr += SECTION_SIZE)
section_update(addr, perms[i].mask,
set ? perms[i].prot : perms[i].clear, mm);
}
}
static void update_sections_early(struct section_perm perms[], int n)
{
set_section_perms(nx_perms, prot);
struct task_struct *t, *s;
read_lock(&tasklist_lock);
for_each_process(t) {
if (t->flags & PF_KTHREAD)
continue;
for_each_thread(t, s)
set_section_perms(perms, n, true, s->mm);
}
read_unlock(&tasklist_lock);
set_section_perms(perms, n, true, current->active_mm);
set_section_perms(perms, n, true, &init_mm);
}
int __fix_kernmem_perms(void *unused)
{
update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
return 0;
}
void fix_kernmem_perms(void)
{
stop_machine(__fix_kernmem_perms, NULL, NULL);
}
#ifdef CONFIG_DEBUG_RODATA
int __mark_rodata_ro(void *unused)
{
update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
return 0;
}
void mark_rodata_ro(void)
{
set_section_perms(ro_perms, prot);
stop_machine(__mark_rodata_ro, NULL, NULL);
}
void set_kernel_text_rw(void)
{
set_section_perms(ro_perms, clear);
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
current->active_mm);
}
void set_kernel_text_ro(void)
{
set_section_perms(ro_perms, prot);
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
current->active_mm);
}
#endif /* CONFIG_DEBUG_RODATA */

Просмотреть файл

@ -95,7 +95,7 @@ ENDPROC(cpu_v7_dcache_clean_area)
.equ cpu_v7_suspend_size, 4 * 9
#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_v7_do_suspend)
stmfd sp!, {r4 - r10, lr}
stmfd sp!, {r4 - r11, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID
stmia r0!, {r4 - r5}
@ -112,7 +112,7 @@ ENTRY(cpu_v7_do_suspend)
mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
stmia r0, {r5 - r11}
ldmfd sp!, {r4 - r10, pc}
ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_v7_do_suspend)
ENTRY(cpu_v7_do_resume)

Просмотреть файл

@ -269,6 +269,7 @@
clock-frequency = <0>; /* Updated by bootloader */
voltage-ranges = <1800 1800 3300 3300>;
sdhci,auto-cmd12;
little-endian;
bus-width = <4>;
};
@ -277,6 +278,7 @@
reg = <0x0 0x2300000 0x0 0x10000>;
interrupts = <0 36 0x4>; /* Level high type */
gpio-controller;
little-endian;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
@ -287,6 +289,7 @@
reg = <0x0 0x2310000 0x0 0x10000>;
interrupts = <0 36 0x4>; /* Level high type */
gpio-controller;
little-endian;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
@ -297,6 +300,7 @@
reg = <0x0 0x2320000 0x0 0x10000>;
interrupts = <0 37 0x4>; /* Level high type */
gpio-controller;
little-endian;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
@ -307,6 +311,7 @@
reg = <0x0 0x2330000 0x0 0x10000>;
interrupts = <0 37 0x4>; /* Level high type */
gpio-controller;
little-endian;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;

Просмотреть файл

@ -77,6 +77,7 @@
#ifndef __ASSEMBLY__
#include <linux/stringify.h>
#include <asm/barrier.h>
/*
* Low-level accessors

Просмотреть файл

@ -276,10 +276,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
* hardware updates of the pte (ptep_set_access_flags safely changes
* valid ptes without going through an invalid entry).
*/
if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
pte_valid(*ptep)) {
BUG_ON(!pte_young(pte));
BUG_ON(pte_write(*ptep) && !pte_dirty(pte));
if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
pte_valid(*ptep) && pte_valid(pte)) {
VM_WARN_ONCE(!pte_young(pte),
"%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
__func__, pte_val(*ptep), pte_val(pte));
VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
"%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
__func__, pte_val(*ptep), pte_val(pte));
}
set_pte(ptep, pte);

Просмотреть файл

@ -5,6 +5,7 @@
*/
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/kernel-pgtable.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
@ -140,7 +141,7 @@ SECTIONS
ARM_EXIT_KEEP(EXIT_DATA)
}
PERCPU_SECTION(64)
PERCPU_SECTION(L1_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
__init_end = .;
@ -158,7 +159,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_data = .;
_sdata = .;
RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
PECOFF_EDATA_PADDING
_edata = .;

Просмотреть файл

@ -14,7 +14,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
*
* ppc:

Просмотреть файл

@ -11,7 +11,7 @@
#define NR_syscalls 322 /* length of syscall table */
#define NR_syscalls 323 /* length of syscall table */
/*
* The following defines stop scripts/checksyscalls.sh from complaining about

Просмотреть файл

@ -335,5 +335,6 @@
#define __NR_userfaultfd 1343
#define __NR_membarrier 1344
#define __NR_kcmp 1345
#define __NR_mlock2 1346
#endif /* _UAPI_ASM_IA64_UNISTD_H */

Просмотреть файл

@ -1771,5 +1771,6 @@ sys_call_table:
data8 sys_userfaultfd
data8 sys_membarrier
data8 sys_kcmp // 1345
data8 sys_mlock2
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls

Просмотреть файл

@ -61,7 +61,8 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
/* FIXME this part of code is untested */
for_each_sg(sgl, sg, nents, i) {
sg->dma_address = sg_phys(sg);
__dma_sync(sg_phys(sg), sg->length, direction);
__dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
sg->length, direction);
}
return nents;

Просмотреть файл

@ -145,7 +145,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
gfp = massage_gfp_flags(dev, gfp);
if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC))
if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
page = dma_alloc_from_contiguous(dev,
count, get_order(size));
if (!page)

Просмотреть файл

@ -372,7 +372,8 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
*/
#ifdef CONFIG_HUGETLB_PAGE
#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_HUGE))
#define pte_mkhuge(pte) (__pte(pte_val(pte) | \
(parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
#else
#define pte_huge(pte) (0)
#define pte_mkhuge(pte) (pte)

Просмотреть файл

@ -360,8 +360,9 @@
#define __NR_execveat (__NR_Linux + 342)
#define __NR_membarrier (__NR_Linux + 343)
#define __NR_userfaultfd (__NR_Linux + 344)
#define __NR_mlock2 (__NR_Linux + 345)
#define __NR_Linux_syscalls (__NR_userfaultfd + 1)
#define __NR_Linux_syscalls (__NR_mlock2 + 1)
#define __IGNORE_select /* newselect */

Просмотреть файл

@ -171,24 +171,6 @@ void pcibios_set_master(struct pci_dev *dev)
}
void __init pcibios_init_bus(struct pci_bus *bus)
{
struct pci_dev *dev = bus->self;
unsigned short bridge_ctl;
/* We deal only with pci controllers and pci-pci bridges. */
if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
return;
/* PCI-PCI bridge - set the cache line and default latency
(32) for primary and secondary buses. */
pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32);
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl);
bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl);
}
/*
* pcibios align resources() is called every time generic PCI code
* wants to generate a new address. The process of looking for

Просмотреть файл

@ -440,6 +440,7 @@
ENTRY_COMP(execveat)
ENTRY_SAME(membarrier)
ENTRY_SAME(userfaultfd)
ENTRY_SAME(mlock2) /* 345 */
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))

Просмотреть файл

@ -227,23 +227,15 @@
reg = <0x520 0x20>;
phy0: ethernet-phy@1f {
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <0x1f>;
};
phy1: ethernet-phy@0 {
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <0>;
};
phy2: ethernet-phy@1 {
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <1>;
};
phy3: ethernet-phy@2 {
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <2>;
};
tbi0: tbi-phy@11 {

Просмотреть файл

@ -370,16 +370,16 @@ COMPAT_SYS(execveat)
PPC64ONLY(switch_endian)
SYSCALL_SPU(userfaultfd)
SYSCALL_SPU(membarrier)
SYSCALL(semop)
SYSCALL(semget)
COMPAT_SYS(semctl)
COMPAT_SYS(semtimedop)
COMPAT_SYS(msgsnd)
COMPAT_SYS(msgrcv)
SYSCALL(msgget)
COMPAT_SYS(msgctl)
COMPAT_SYS(shmat)
SYSCALL(shmdt)
SYSCALL(shmget)
COMPAT_SYS(shmctl)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
SYSCALL(mlock2)

Просмотреть файл

@ -388,18 +388,6 @@
#define __NR_switch_endian 363
#define __NR_userfaultfd 364
#define __NR_membarrier 365
#define __NR_semop 366
#define __NR_semget 367
#define __NR_semctl 368
#define __NR_semtimedop 369
#define __NR_msgsnd 370
#define __NR_msgrcv 371
#define __NR_msgget 372
#define __NR_msgctl 373
#define __NR_shmat 374
#define __NR_shmdt 375
#define __NR_shmget 376
#define __NR_shmctl 377
#define __NR_mlock2 378
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */

Просмотреть файл

@ -590,16 +590,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
eeh_ops->configure_bridge(pe);
eeh_pe_restore_bars(pe);
/*
* If it's PHB PE, the frozen state on all available PEs should have
* been cleared by the PHB reset. Otherwise, we unfreeze the PE and its
* child PEs because they might be in frozen state.
*/
if (!(pe->type & EEH_PE_PHB)) {
/* Clear frozen state */
rc = eeh_clear_pe_frozen_state(pe, false);
if (rc)
return rc;
}
/* Give the system 5 seconds to finish running the user-space
* hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,

Просмотреть файл

@ -43,11 +43,34 @@ static unsigned int opal_irq_count;
static unsigned int *opal_irqs;
static void opal_handle_irq_work(struct irq_work *work);
static __be64 last_outstanding_events;
static u64 last_outstanding_events;
static struct irq_work opal_event_irq_work = {
.func = opal_handle_irq_work,
};
void opal_handle_events(uint64_t events)
{
int virq, hwirq = 0;
u64 mask = opal_event_irqchip.mask;
if (!in_irq() && (events & mask)) {
last_outstanding_events = events;
irq_work_queue(&opal_event_irq_work);
return;
}
while (events & mask) {
hwirq = fls64(events) - 1;
if (BIT_ULL(hwirq) & mask) {
virq = irq_find_mapping(opal_event_irqchip.domain,
hwirq);
if (virq)
generic_handle_irq(virq);
}
events &= ~BIT_ULL(hwirq);
}
}
static void opal_event_mask(struct irq_data *d)
{
clear_bit(d->hwirq, &opal_event_irqchip.mask);
@ -55,9 +78,21 @@ static void opal_event_mask(struct irq_data *d)
static void opal_event_unmask(struct irq_data *d)
{
__be64 events;
set_bit(d->hwirq, &opal_event_irqchip.mask);
opal_poll_events(&last_outstanding_events);
opal_poll_events(&events);
last_outstanding_events = be64_to_cpu(events);
/*
* We can't just handle the events now with opal_handle_events().
* If we did we would deadlock when opal_event_unmask() is called from
* handle_level_irq() with the irq descriptor lock held, because
* calling opal_handle_events() would call generic_handle_irq() and
* then handle_level_irq() which would try to take the descriptor lock
* again. Instead queue the events for later.
*/
if (last_outstanding_events & opal_event_irqchip.mask)
/* Need to retrigger the interrupt */
irq_work_queue(&opal_event_irq_work);
@ -96,29 +131,6 @@ static int opal_event_map(struct irq_domain *d, unsigned int irq,
return 0;
}
void opal_handle_events(uint64_t events)
{
int virq, hwirq = 0;
u64 mask = opal_event_irqchip.mask;
if (!in_irq() && (events & mask)) {
last_outstanding_events = events;
irq_work_queue(&opal_event_irq_work);
return;
}
while (events & mask) {
hwirq = fls64(events) - 1;
if (BIT_ULL(hwirq) & mask) {
virq = irq_find_mapping(opal_event_irqchip.domain,
hwirq);
if (virq)
generic_handle_irq(virq);
}
events &= ~BIT_ULL(hwirq);
}
}
static irqreturn_t opal_interrupt(int irq, void *data)
{
__be64 events;
@ -131,7 +143,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
static void opal_handle_irq_work(struct irq_work *work)
{
opal_handle_events(be64_to_cpu(last_outstanding_events));
opal_handle_events(last_outstanding_events);
}
static int opal_event_match(struct irq_domain *h, struct device_node *node,

Просмотреть файл

@ -278,7 +278,7 @@ static void opal_handle_message(void)
/* Sanity check */
if (type >= OPAL_MSG_TYPE_MAX) {
pr_warning("%s: Unknown message type: %u\n", __func__, type);
pr_warn_once("%s: Unknown message type: %u\n", __func__, type);
return;
}
opal_message_do_notify(type, (void *)&msg);

Просмотреть файл

@ -278,7 +278,7 @@
#define __NR_fsetxattr 256
#define __NR_getxattr 257
#define __NR_lgetxattr 258
#define __NR_fgetxattr 269
#define __NR_fgetxattr 259
#define __NR_listxattr 260
#define __NR_llistxattr 261
#define __NR_flistxattr 262

Просмотреть файл

@ -10,7 +10,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
*
* ppc:

Просмотреть файл

@ -9,7 +9,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
*/
#include <linux/perf_event.h>

Просмотреть файл

@ -21,7 +21,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
* Copyright (C) 2009 Google, Inc., Stephane Eranian
*/

Просмотреть файл

@ -131,7 +131,7 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT)
# The wrappers will select whether using "malloc" or the kernel allocator.
LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt)) -lrt
LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt))
# Used by link-vmlinux.sh which has special support for um link
export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)

Просмотреть файл

@ -249,21 +249,23 @@ void close_addr(unsigned char *addr, unsigned char *netmask, void *arg)
char *split_if_spec(char *str, ...)
{
char **arg, *end;
char **arg, *end, *ret = NULL;
va_list ap;
va_start(ap, str);
while ((arg = va_arg(ap, char **)) != NULL) {
if (*str == '\0')
return NULL;
goto out;
end = strchr(str, ',');
if (end != str)
*arg = str;
if (end == NULL)
return NULL;
goto out;
*end++ = '\0';
str = end;
}
ret = str;
out:
va_end(ap);
return str;
return ret;
}

Просмотреть файл

@ -69,7 +69,7 @@ void do_signal(struct pt_regs *regs)
struct ksignal ksig;
int handled_sig = 0;
while (get_signal(&ksig)) {
if (get_signal(&ksig)) {
handled_sig = 1;
/* Whee! Actually deliver the signal. */
handle_signal(&ksig, regs);

Просмотреть файл

@ -5,7 +5,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
* Copyright (C) 2009 Google, Inc., Stephane Eranian
*

Просмотреть файл

@ -5,7 +5,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
* Copyright (C) 2009 Google, Inc., Stephane Eranian
*
@ -387,7 +387,7 @@ struct cpu_hw_events {
/* Check flags and event code/umask, and set the HSW N/A flag */
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
__EVENT_CONSTRAINT(code, n, \
INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \
INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
@ -627,6 +627,7 @@ struct x86_perf_task_context {
u64 lbr_from[MAX_LBR_ENTRIES];
u64 lbr_to[MAX_LBR_ENTRIES];
u64 lbr_info[MAX_LBR_ENTRIES];
int tos;
int lbr_callstack_users;
int lbr_stack_state;
};

Просмотреть файл

@ -232,7 +232,7 @@ static struct event_constraint intel_hsw_event_constraints[] = {
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */

Просмотреть файл

@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
{
if (event->attach_state & PERF_ATTACH_TASK)
return perf_cgroup_from_task(event->hw.target);
return perf_cgroup_from_task(event->hw.target, event->ctx);
return event->cgrp;
}

Просмотреть файл

@ -239,7 +239,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
}
mask = x86_pmu.lbr_nr - 1;
tos = intel_pmu_lbr_tos();
tos = task_ctx->tos;
for (i = 0; i < tos; i++) {
lbr_idx = (tos - i) & mask;
wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
@ -247,6 +247,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
wrmsrl(x86_pmu.lbr_tos, tos);
task_ctx->lbr_stack_state = LBR_NONE;
}
@ -270,6 +271,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
task_ctx->tos = tos;
task_ctx->lbr_stack_state = LBR_VALID;
}

Просмотреть файл

@ -1,7 +1,7 @@
/*
* x86 specific code for irq_work
*
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
*/
#include <linux/kernel.h>

Просмотреть файл

@ -89,7 +89,7 @@ static struct addr_marker address_markers[] = {
{ 0/* VMALLOC_START */, "vmalloc() Area" },
{ 0/*VMALLOC_END*/, "vmalloc() End" },
# ifdef CONFIG_HIGHMEM
{ 0/*PKMAP_BASE*/, "Persisent kmap() Area" },
{ 0/*PKMAP_BASE*/, "Persistent kmap() Area" },
# endif
{ 0/*FIXADDR_START*/, "Fixmap Area" },
#endif

Просмотреть файл

@ -211,7 +211,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
if (err)
return 1;
err = convert_fxsr_from_user(&fpx, sc.fpstate);
err = convert_fxsr_from_user(&fpx, (void *)sc.fpstate);
if (err)
return 1;
@ -227,7 +227,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
{
struct user_i387_struct fp;
err = copy_from_user(&fp, sc.fpstate,
err = copy_from_user(&fp, (void *)sc.fpstate,
sizeof(struct user_i387_struct));
if (err)
return 1;
@ -291,7 +291,7 @@ static int copy_sc_to_user(struct sigcontext __user *to,
#endif
#undef PUTREG
sc.oldmask = mask;
sc.fpstate = to_fp;
sc.fpstate = (unsigned long)to_fp;
err = copy_to_user(to, &sc, sizeof(struct sigcontext));
if (err)
@ -468,12 +468,10 @@ long sys_sigreturn(void)
struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
sigset_t set;
struct sigcontext __user *sc = &frame->sc;
unsigned long __user *oldmask = &sc->oldmask;
unsigned long __user *extramask = frame->extramask;
int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) ||
copy_from_user(&set.sig[1], extramask, sig_size))
if (copy_from_user(&set.sig[0], (void *)sc->oldmask, sizeof(set.sig[0])) ||
copy_from_user(&set.sig[1], frame->extramask, sig_size))
goto segfault;
set_current_blocked(&set);
@ -505,6 +503,7 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
{
struct rt_sigframe __user *frame;
int err = 0, sig = ksig->sig;
unsigned long fp_to;
frame = (struct rt_sigframe __user *)
round_down(stack_top - sizeof(struct rt_sigframe), 16);
@ -526,7 +525,10 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs));
err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
set->sig[0]);
err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate);
fp_to = (unsigned long)&frame->fpstate;
err |= __put_user(fp_to, &frame->uc.uc_mcontext.fpstate);
if (sizeof(*set) == 16) {
err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);

Просмотреть файл

@ -2495,14 +2495,9 @@ void __init xen_init_mmu_ops(void)
{
x86_init.paging.pagetable_init = xen_pagetable_init;
/* Optimization - we can use the HVM one but it has no idea which
* VCPUs are descheduled - which means that it will needlessly IPI
* them. Xen knows so let it do the job.
*/
if (xen_feature(XENFEAT_auto_translated_physmap)) {
pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
}
pv_mmu_ops = xen_mmu_ops;
memset(dummy_mapping, 0xff, PAGE_SIZE);

Просмотреть файл

@ -68,26 +68,16 @@ static void xen_pv_post_suspend(int suspend_cancelled)
void xen_arch_pre_suspend(void)
{
int cpu;
for_each_online_cpu(cpu)
xen_pmu_finish(cpu);
if (xen_pv_domain())
xen_pv_pre_suspend();
}
void xen_arch_post_suspend(int cancelled)
{
int cpu;
if (xen_pv_domain())
xen_pv_post_suspend(cancelled);
else
xen_hvm_post_suspend(cancelled);
for_each_online_cpu(cpu)
xen_pmu_init(cpu);
}
static void xen_vcpu_notify_restore(void *data)
@ -106,10 +96,20 @@ static void xen_vcpu_notify_suspend(void *data)
void xen_arch_resume(void)
{
int cpu;
on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
for_each_online_cpu(cpu)
xen_pmu_init(cpu);
}
void xen_arch_suspend(void)
{
int cpu;
for_each_online_cpu(cpu)
xen_pmu_finish(cpu);
on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
}

Просмотреть файл

@ -1127,15 +1127,15 @@ void blkcg_exit_queue(struct request_queue *q)
* of the main cic data structures. For now we allow a task to change
* its cgroup only if it's the only owner of its ioc.
*/
static int blkcg_can_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
static int blkcg_can_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
struct cgroup_subsys_state *dst_css;
struct io_context *ioc;
int ret = 0;
/* task_lock() is needed to avoid races with exit_io_context() */
cgroup_taskset_for_each(task, tset) {
cgroup_taskset_for_each(task, dst_css, tset) {
task_lock(task);
ioc = task->io_context;
if (ioc && atomic_read(&ioc->nr_tasks) > 1)

Просмотреть файл

@ -3405,6 +3405,9 @@ int blk_pre_runtime_suspend(struct request_queue *q)
{
int ret = 0;
if (!q->dev)
return ret;
spin_lock_irq(q->queue_lock);
if (q->nr_pending) {
ret = -EBUSY;
@ -3432,6 +3435,9 @@ EXPORT_SYMBOL(blk_pre_runtime_suspend);
*/
void blk_post_runtime_suspend(struct request_queue *q, int err)
{
if (!q->dev)
return;
spin_lock_irq(q->queue_lock);
if (!err) {
q->rpm_status = RPM_SUSPENDED;
@ -3456,6 +3462,9 @@ EXPORT_SYMBOL(blk_post_runtime_suspend);
*/
void blk_pre_runtime_resume(struct request_queue *q)
{
if (!q->dev)
return;
spin_lock_irq(q->queue_lock);
q->rpm_status = RPM_RESUMING;
spin_unlock_irq(q->queue_lock);
@ -3478,6 +3487,9 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
*/
void blk_post_runtime_resume(struct request_queue *q, int err)
{
if (!q->dev)
return;
spin_lock_irq(q->queue_lock);
if (!err) {
q->rpm_status = RPM_ACTIVE;

Просмотреть файл

@ -277,12 +277,12 @@ static int ablkcipher_walk_first(struct ablkcipher_request *req,
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
walk->iv = req->info;
walk->nbytes = walk->total;
if (unlikely(!walk->total))
return 0;
walk->iv_buffer = NULL;
walk->iv = req->info;
if (unlikely(((unsigned long)walk->iv & alignmask))) {
int err = ablkcipher_copy_iv(walk, tfm, alignmask);

Просмотреть файл

@ -326,12 +326,12 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
walk->iv = desc->info;
walk->nbytes = walk->total;
if (unlikely(!walk->total))
return 0;
walk->buffer = NULL;
walk->iv = desc->info;
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
int err = blkcipher_copy_iv(walk);
if (err)

Просмотреть файл

@ -1810,7 +1810,7 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
if (!dev->driver) {
/* dev->driver may be null if we're being removed */
dev_dbg(dev, "%s: no driver found for dev\n", __func__);
return;
goto out_unlock;
}
if (!acpi_desc) {

Просмотреть файл

@ -314,16 +314,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
@ -350,10 +340,22 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,

Просмотреть файл

@ -62,6 +62,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
}
#ifdef CONFIG_PM_SLEEP
static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
{
return ahci_platform_suspend_host(&pdev->dev);
@ -81,6 +82,10 @@ static int ahci_mvebu_resume(struct platform_device *pdev)
return ahci_platform_resume_host(&pdev->dev);
}
#else
#define ahci_mvebu_suspend NULL
#define ahci_mvebu_resume NULL
#endif
static const struct ata_port_info ahci_mvebu_port_info = {
.flags = AHCI_FLAG_COMMON,

Просмотреть файл

@ -1273,6 +1273,15 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
ata_tf_to_fis(tf, pmp, is_cmd, fis);
ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
/* set port value for softreset of Port Multiplier */
if (pp->fbs_enabled && pp->fbs_last_dev != pmp) {
tmp = readl(port_mmio + PORT_FBS);
tmp &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
tmp |= pmp << PORT_FBS_DEV_OFFSET;
writel(tmp, port_mmio + PORT_FBS);
pp->fbs_last_dev = pmp;
}
/* issue & wait */
writel(1, port_mmio + PORT_CMD_ISSUE);

Просмотреть файл

@ -1505,12 +1505,20 @@ static const char *ata_err_string(unsigned int err_mask)
unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
u8 page, void *buf, unsigned int sectors)
{
unsigned long ap_flags = dev->link->ap->flags;
struct ata_taskfile tf;
unsigned int err_mask;
bool dma = false;
DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
/*
* Return error without actually issuing the command on controllers
* which e.g. lockup on a read log page.
*/
if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
return AC_ERR_DEV;
retry:
ata_tf_init(dev, &tf);
if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&

Просмотреть файл

@ -45,7 +45,8 @@ enum {
SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */
SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
ATA_FLAG_PMP | ATA_FLAG_NCQ | ATA_FLAG_AN),
ATA_FLAG_PMP | ATA_FLAG_NCQ |
ATA_FLAG_AN | ATA_FLAG_NO_LOG_PAGE),
SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH,
SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */

Просмотреть файл

@ -630,6 +630,9 @@ static void sil_dev_config(struct ata_device *dev)
unsigned int n, quirks = 0;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
/* This controller doesn't support trim */
dev->horkage |= ATA_HORKAGE_NOTRIM;
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
for (n = 0; sil_blacklist[n].product; n++)

Просмотреть файл

@ -303,6 +303,10 @@ static int memory_subsys_offline(struct device *dev)
if (mem->state == MEM_OFFLINE)
return 0;
/* Can't offline block with non-present sections */
if (mem->section_count != sections_per_block)
return -EINVAL;
return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
}

Просмотреть файл

@ -390,6 +390,7 @@ static int pm_genpd_runtime_suspend(struct device *dev)
struct generic_pm_domain *genpd;
bool (*stop_ok)(struct device *__dev);
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
bool runtime_pm = pm_runtime_enabled(dev);
ktime_t time_start;
s64 elapsed_ns;
int ret;
@ -400,11 +401,18 @@ static int pm_genpd_runtime_suspend(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
/*
* A runtime PM centric subsystem/driver may re-use the runtime PM
* callbacks for other purposes than runtime PM. In those scenarios
* runtime PM is disabled. Under these circumstances, we shall skip
* validating/measuring the PM QoS latency.
*/
stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
if (stop_ok && !stop_ok(dev))
if (runtime_pm && stop_ok && !stop_ok(dev))
return -EBUSY;
/* Measure suspend latency. */
if (runtime_pm)
time_start = ktime_get();
ret = genpd_save_dev(genpd, dev);
@ -418,6 +426,7 @@ static int pm_genpd_runtime_suspend(struct device *dev)
}
/* Update suspend latency value if the measured time exceeds it. */
if (runtime_pm) {
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (elapsed_ns > td->suspend_latency_ns) {
td->suspend_latency_ns = elapsed_ns;
@ -426,6 +435,7 @@ static int pm_genpd_runtime_suspend(struct device *dev)
genpd->max_off_time_changed = true;
td->constraint_changed = true;
}
}
/*
* If power.irq_safe is set, this routine will be run with interrupts
@ -453,6 +463,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
{
struct generic_pm_domain *genpd;
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
bool runtime_pm = pm_runtime_enabled(dev);
ktime_t time_start;
s64 elapsed_ns;
int ret;
@ -479,14 +490,14 @@ static int pm_genpd_runtime_resume(struct device *dev)
out:
/* Measure resume latency. */
if (timed)
if (timed && runtime_pm)
time_start = ktime_get();
genpd_start_dev(genpd, dev);
genpd_restore_dev(genpd, dev);
/* Update resume latency value if the measured time exceeds it. */
if (timed) {
if (timed && runtime_pm) {
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (elapsed_ns > td->resume_latency_ns) {
td->resume_latency_ns = elapsed_ns;

Просмотреть файл

@ -444,8 +444,9 @@ static void null_lnvm_end_io(struct request *rq, int error)
blk_put_request(rq);
}
static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
struct request_queue *q = dev->q;
struct request *rq;
struct bio *bio = rqd->bio;
@ -470,7 +471,7 @@ static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
return 0;
}
static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
{
sector_t size = gb * 1024 * 1024 * 1024ULL;
sector_t blksize;
@ -523,7 +524,7 @@ static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
return 0;
}
static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name)
static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
{
mempool_t *virtmem_pool;
@ -541,7 +542,7 @@ static void null_lnvm_destroy_dma_pool(void *pool)
mempool_destroy(pool);
}
static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool,
static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
gfp_t mem_flags, dma_addr_t *dma_handler)
{
return mempool_alloc(pool, mem_flags);
@ -765,7 +766,9 @@ out:
static int __init null_init(void)
{
int ret = 0;
unsigned int i;
struct nullb *nullb;
if (bs > PAGE_SIZE) {
pr_warn("null_blk: invalid block size\n");
@ -807,22 +810,29 @@ static int __init null_init(void)
0, 0, NULL);
if (!ppa_cache) {
pr_err("null_blk: unable to create ppa cache\n");
return -ENOMEM;
}
}
for (i = 0; i < nr_devices; i++) {
if (null_add_dev()) {
unregister_blkdev(null_major, "nullb");
ret = -ENOMEM;
goto err_ppa;
}
}
for (i = 0; i < nr_devices; i++) {
ret = null_add_dev();
if (ret)
goto err_dev;
}
pr_info("null: module loaded\n");
return 0;
err_ppa:
err_dev:
while (!list_empty(&nullb_list)) {
nullb = list_entry(nullb_list.next, struct nullb, list);
null_del_dev(nullb);
}
kmem_cache_destroy(ppa_cache);
return -EINVAL;
err_ppa:
unregister_blkdev(null_major, "nullb");
return ret;
}
static void __exit null_exit(void)

Просмотреть файл

@ -950,6 +950,8 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
goto unmap;
for (n = 0, i = 0; n < nseg; n++) {
uint8_t first_sect, last_sect;
if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
/* Map indirect segments */
if (segments)
@ -957,15 +959,18 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
}
i = n % SEGS_PER_INDIRECT_FRAME;
pending_req->segments[n]->gref = segments[i].gref;
seg[n].nsec = segments[i].last_sect -
segments[i].first_sect + 1;
seg[n].offset = (segments[i].first_sect << 9);
if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
(segments[i].last_sect < segments[i].first_sect)) {
first_sect = READ_ONCE(segments[i].first_sect);
last_sect = READ_ONCE(segments[i].last_sect);
if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
rc = -EINVAL;
goto unmap;
}
seg[n].nsec = last_sect - first_sect + 1;
seg[n].offset = first_sect << 9;
preq->nr_sects += seg[n].nsec;
}

Просмотреть файл

@ -408,8 +408,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
struct blkif_x86_32_request *src)
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
dst->operation = src->operation;
switch (src->operation) {
dst->operation = READ_ONCE(src->operation);
switch (dst->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
@ -456,8 +456,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
struct blkif_x86_64_request *src)
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
dst->operation = src->operation;
switch (src->operation) {
dst->operation = READ_ONCE(src->operation);
switch (dst->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:

Просмотреть файл

@ -1230,14 +1230,14 @@ static int smi_start_processing(void *send_info,
new_smi->intf = intf;
/* Try to claim any interrupts. */
if (new_smi->irq_setup)
new_smi->irq_setup(new_smi);
/* Set up the timer that drives the interface. */
setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
/* Try to claim any interrupts. */
if (new_smi->irq_setup)
new_smi->irq_setup(new_smi);
/*
* Check if the user forcefully enabled the daemon.
*/

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше