Merge branch 'devel-stable' into for-next
Conflicts: arch/arm/include/asm/atomic.h arch/arm/include/asm/hardirq.h arch/arm/kernel/smp.c
This commit is contained in:
Коммит
df762eccba
|
@ -52,6 +52,8 @@ config ARM
|
|||
select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND
|
||||
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_UID16
|
||||
|
@ -482,6 +484,7 @@ config ARCH_IXP4XX
|
|||
bool "IXP4xx-based"
|
||||
depends on MMU
|
||||
select ARCH_HAS_DMA_SET_COHERENT_MASK
|
||||
select ARCH_SUPPORTS_BIG_ENDIAN
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select CLKSRC_MMIO
|
||||
select CPU_XSCALE
|
||||
|
@ -1545,6 +1548,32 @@ config MCPM
|
|||
for (multi-)cluster based systems, such as big.LITTLE based
|
||||
systems.
|
||||
|
||||
config BIG_LITTLE
|
||||
bool "big.LITTLE support (Experimental)"
|
||||
depends on CPU_V7 && SMP
|
||||
select MCPM
|
||||
help
|
||||
This option enables support selections for the big.LITTLE
|
||||
system architecture.
|
||||
|
||||
config BL_SWITCHER
|
||||
bool "big.LITTLE switcher support"
|
||||
depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
|
||||
select CPU_PM
|
||||
select ARM_CPU_SUSPEND
|
||||
help
|
||||
The big.LITTLE "switcher" provides the core functionality to
|
||||
transparently handle transition between a cluster of A15's
|
||||
and a cluster of A7's in a big.LITTLE system.
|
||||
|
||||
config BL_SWITCHER_DUMMY_IF
|
||||
tristate "Simple big.LITTLE switcher user interface"
|
||||
depends on BL_SWITCHER && DEBUG_KERNEL
|
||||
help
|
||||
This is a simple and dummy char dev interface to control
|
||||
the big.LITTLE switcher core code. It is meant for
|
||||
debugging purposes only.
|
||||
|
||||
choice
|
||||
prompt "Memory split"
|
||||
default VMSPLIT_3G
|
||||
|
|
|
@ -16,6 +16,7 @@ LDFLAGS :=
|
|||
LDFLAGS_vmlinux :=-p --no-undefined -X
|
||||
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
|
||||
LDFLAGS_vmlinux += --be8
|
||||
LDFLAGS_MODULE += --be8
|
||||
endif
|
||||
|
||||
OBJCOPYFLAGS :=-O binary -R .comment -S
|
||||
|
|
|
@ -135,6 +135,7 @@ start:
|
|||
.word _edata @ zImage end address
|
||||
THUMB( .thumb )
|
||||
1:
|
||||
ARM_BE8( setend be ) @ go BE8 if compiled for BE8
|
||||
mrs r9, cpsr
|
||||
#ifdef CONFIG_ARM_VIRT_EXT
|
||||
bl __hyp_stub_install @ get into SVC mode, reversibly
|
||||
|
@ -699,9 +700,7 @@ __armv4_mmu_cache_on:
|
|||
mrc p15, 0, r0, c1, c0, 0 @ read control reg
|
||||
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
|
||||
orr r0, r0, #0x0030
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
orr r0, r0, #1 << 25 @ big-endian page tables
|
||||
#endif
|
||||
ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
|
||||
bl __common_mmu_cache_on
|
||||
mov r0, #0
|
||||
mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
|
||||
|
@ -728,9 +727,7 @@ __armv7_mmu_cache_on:
|
|||
orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
|
||||
@ (needed for ARM1176)
|
||||
#ifdef CONFIG_MMU
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
orr r0, r0, #1 << 25 @ big-endian page tables
|
||||
#endif
|
||||
ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
|
||||
mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
|
||||
orrne r0, r0, #1 @ MMU enabled
|
||||
movne r1, #0xfffffffd @ domain 0 = client
|
||||
|
|
|
@ -17,3 +17,5 @@ obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
|
|||
AFLAGS_mcpm_head.o := -march=armv7-a
|
||||
AFLAGS_vlock.o := -march=armv7-a
|
||||
obj-$(CONFIG_TI_PRIV_EDMA) += edma.o
|
||||
obj-$(CONFIG_BL_SWITCHER) += bL_switcher.o
|
||||
obj-$(CONFIG_BL_SWITCHER_DUMMY_IF) += bL_switcher_dummy_if.o
|
||||
|
|
|
@ -0,0 +1,822 @@
|
|||
/*
|
||||
* arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
|
||||
*
|
||||
* Created by: Nicolas Pitre, March 2012
|
||||
* Copyright: (C) 2012-2013 Linaro Limited
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/cpu_pm.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/irqchip/arm-gic.h>
|
||||
#include <linux/moduleparam.h>
|
||||
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/suspend.h>
|
||||
#include <asm/mcpm.h>
|
||||
#include <asm/bL_switcher.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/power_cpu_migrate.h>
|
||||
|
||||
|
||||
/*
|
||||
* Use our own MPIDR accessors as the generic ones in asm/cputype.h have
|
||||
* __attribute_const__ and we don't want the compiler to assume any
|
||||
* constness here as the value _does_ change along some code paths.
|
||||
*/
|
||||
|
||||
static int read_mpidr(void)
|
||||
{
|
||||
unsigned int id;
|
||||
asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id));
|
||||
return id & MPIDR_HWID_BITMASK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a global nanosecond time stamp for tracing.
|
||||
*/
|
||||
static s64 get_ns(void)
|
||||
{
|
||||
struct timespec ts;
|
||||
getnstimeofday(&ts);
|
||||
return timespec_to_ns(&ts);
|
||||
}
|
||||
|
||||
/*
|
||||
* bL switcher core code.
|
||||
*/
|
||||
|
||||
static void bL_do_switch(void *_arg)
|
||||
{
|
||||
unsigned ib_mpidr, ib_cpu, ib_cluster;
|
||||
long volatile handshake, **handshake_ptr = _arg;
|
||||
|
||||
pr_debug("%s\n", __func__);
|
||||
|
||||
ib_mpidr = cpu_logical_map(smp_processor_id());
|
||||
ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
|
||||
ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
|
||||
|
||||
/* Advertise our handshake location */
|
||||
if (handshake_ptr) {
|
||||
handshake = 0;
|
||||
*handshake_ptr = &handshake;
|
||||
} else
|
||||
handshake = -1;
|
||||
|
||||
/*
|
||||
* Our state has been saved at this point. Let's release our
|
||||
* inbound CPU.
|
||||
*/
|
||||
mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume);
|
||||
sev();
|
||||
|
||||
/*
|
||||
* From this point, we must assume that our counterpart CPU might
|
||||
* have taken over in its parallel world already, as if execution
|
||||
* just returned from cpu_suspend(). It is therefore important to
|
||||
* be very careful not to make any change the other guy is not
|
||||
* expecting. This is why we need stack isolation.
|
||||
*
|
||||
* Fancy under cover tasks could be performed here. For now
|
||||
* we have none.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Let's wait until our inbound is alive.
|
||||
*/
|
||||
while (!handshake) {
|
||||
wfe();
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
/* Let's put ourself down. */
|
||||
mcpm_cpu_power_down();
|
||||
|
||||
/* should never get here */
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Stack isolation. To ensure 'current' remains valid, we just use another
|
||||
* piece of our thread's stack space which should be fairly lightly used.
|
||||
* The selected area starts just above the thread_info structure located
|
||||
* at the very bottom of the stack, aligned to a cache line, and indexed
|
||||
* with the cluster number.
|
||||
*/
|
||||
#define STACK_SIZE 512
|
||||
extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
|
||||
static int bL_switchpoint(unsigned long _arg)
|
||||
{
|
||||
unsigned int mpidr = read_mpidr();
|
||||
unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
||||
void *stack = current_thread_info() + 1;
|
||||
stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
|
||||
stack += clusterid * STACK_SIZE + STACK_SIZE;
|
||||
call_with_stack(bL_do_switch, (void *)_arg, stack);
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic switcher interface
|
||||
*/
|
||||
|
||||
static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS];
|
||||
static int bL_switcher_cpu_pairing[NR_CPUS];
|
||||
|
||||
/*
|
||||
* bL_switch_to - Switch to a specific cluster for the current CPU
|
||||
* @new_cluster_id: the ID of the cluster to switch to.
|
||||
*
|
||||
* This function must be called on the CPU to be switched.
|
||||
* Returns 0 on success, else a negative status code.
|
||||
*/
|
||||
static int bL_switch_to(unsigned int new_cluster_id)
|
||||
{
|
||||
unsigned int mpidr, this_cpu, that_cpu;
|
||||
unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
|
||||
struct completion inbound_alive;
|
||||
struct tick_device *tdev;
|
||||
enum clock_event_mode tdev_mode;
|
||||
long volatile *handshake_ptr;
|
||||
int ipi_nr, ret;
|
||||
|
||||
this_cpu = smp_processor_id();
|
||||
ob_mpidr = read_mpidr();
|
||||
ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0);
|
||||
ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1);
|
||||
BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr);
|
||||
|
||||
if (new_cluster_id == ob_cluster)
|
||||
return 0;
|
||||
|
||||
that_cpu = bL_switcher_cpu_pairing[this_cpu];
|
||||
ib_mpidr = cpu_logical_map(that_cpu);
|
||||
ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
|
||||
ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
|
||||
|
||||
pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
|
||||
this_cpu, ob_mpidr, ib_mpidr);
|
||||
|
||||
this_cpu = smp_processor_id();
|
||||
|
||||
/* Close the gate for our entry vectors */
|
||||
mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL);
|
||||
mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL);
|
||||
|
||||
/* Install our "inbound alive" notifier. */
|
||||
init_completion(&inbound_alive);
|
||||
ipi_nr = register_ipi_completion(&inbound_alive, this_cpu);
|
||||
ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]);
|
||||
mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr);
|
||||
|
||||
/*
|
||||
* Let's wake up the inbound CPU now in case it requires some delay
|
||||
* to come online, but leave it gated in our entry vector code.
|
||||
*/
|
||||
ret = mcpm_cpu_power_up(ib_cpu, ib_cluster);
|
||||
if (ret) {
|
||||
pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Raise a SGI on the inbound CPU to make sure it doesn't stall
|
||||
* in a possible WFI, such as in bL_power_down().
|
||||
*/
|
||||
gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0);
|
||||
|
||||
/*
|
||||
* Wait for the inbound to come up. This allows for other
|
||||
* tasks to be scheduled in the mean time.
|
||||
*/
|
||||
wait_for_completion(&inbound_alive);
|
||||
mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0);
|
||||
|
||||
/*
|
||||
* From this point we are entering the switch critical zone
|
||||
* and can't take any interrupts anymore.
|
||||
*/
|
||||
local_irq_disable();
|
||||
local_fiq_disable();
|
||||
trace_cpu_migrate_begin(get_ns(), ob_mpidr);
|
||||
|
||||
/* redirect GIC's SGIs to our counterpart */
|
||||
gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
|
||||
|
||||
tdev = tick_get_device(this_cpu);
|
||||
if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
|
||||
tdev = NULL;
|
||||
if (tdev) {
|
||||
tdev_mode = tdev->evtdev->mode;
|
||||
clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
|
||||
}
|
||||
|
||||
ret = cpu_pm_enter();
|
||||
|
||||
/* we can not tolerate errors at this point */
|
||||
if (ret)
|
||||
panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
|
||||
|
||||
/* Swap the physical CPUs in the logical map for this logical CPU. */
|
||||
cpu_logical_map(this_cpu) = ib_mpidr;
|
||||
cpu_logical_map(that_cpu) = ob_mpidr;
|
||||
|
||||
/* Let's do the actual CPU switch. */
|
||||
ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint);
|
||||
if (ret > 0)
|
||||
panic("%s: cpu_suspend() returned %d\n", __func__, ret);
|
||||
|
||||
/* We are executing on the inbound CPU at this point */
|
||||
mpidr = read_mpidr();
|
||||
pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr);
|
||||
BUG_ON(mpidr != ib_mpidr);
|
||||
|
||||
mcpm_cpu_powered_up();
|
||||
|
||||
ret = cpu_pm_exit();
|
||||
|
||||
if (tdev) {
|
||||
clockevents_set_mode(tdev->evtdev, tdev_mode);
|
||||
clockevents_program_event(tdev->evtdev,
|
||||
tdev->evtdev->next_event, 1);
|
||||
}
|
||||
|
||||
trace_cpu_migrate_finish(get_ns(), ib_mpidr);
|
||||
local_fiq_enable();
|
||||
local_irq_enable();
|
||||
|
||||
*handshake_ptr = 1;
|
||||
dsb_sev();
|
||||
|
||||
if (ret)
|
||||
pr_err("%s exiting with error %d\n", __func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct bL_thread {
|
||||
spinlock_t lock;
|
||||
struct task_struct *task;
|
||||
wait_queue_head_t wq;
|
||||
int wanted_cluster;
|
||||
struct completion started;
|
||||
bL_switch_completion_handler completer;
|
||||
void *completer_cookie;
|
||||
};
|
||||
|
||||
static struct bL_thread bL_threads[NR_CPUS];
|
||||
|
||||
static int bL_switcher_thread(void *arg)
|
||||
{
|
||||
struct bL_thread *t = arg;
|
||||
struct sched_param param = { .sched_priority = 1 };
|
||||
int cluster;
|
||||
bL_switch_completion_handler completer;
|
||||
void *completer_cookie;
|
||||
|
||||
sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
|
||||
complete(&t->started);
|
||||
|
||||
do {
|
||||
if (signal_pending(current))
|
||||
flush_signals(current);
|
||||
wait_event_interruptible(t->wq,
|
||||
t->wanted_cluster != -1 ||
|
||||
kthread_should_stop());
|
||||
|
||||
spin_lock(&t->lock);
|
||||
cluster = t->wanted_cluster;
|
||||
completer = t->completer;
|
||||
completer_cookie = t->completer_cookie;
|
||||
t->wanted_cluster = -1;
|
||||
t->completer = NULL;
|
||||
spin_unlock(&t->lock);
|
||||
|
||||
if (cluster != -1) {
|
||||
bL_switch_to(cluster);
|
||||
|
||||
if (completer)
|
||||
completer(completer_cookie);
|
||||
}
|
||||
} while (!kthread_should_stop());
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
|
||||
{
|
||||
struct task_struct *task;
|
||||
|
||||
task = kthread_create_on_node(bL_switcher_thread, arg,
|
||||
cpu_to_node(cpu), "kswitcher_%d", cpu);
|
||||
if (!IS_ERR(task)) {
|
||||
kthread_bind(task, cpu);
|
||||
wake_up_process(task);
|
||||
} else
|
||||
pr_err("%s failed for CPU %d\n", __func__, cpu);
|
||||
return task;
|
||||
}
|
||||
|
||||
/*
|
||||
* bL_switch_request_cb - Switch to a specific cluster for the given CPU,
|
||||
* with completion notification via a callback
|
||||
*
|
||||
* @cpu: the CPU to switch
|
||||
* @new_cluster_id: the ID of the cluster to switch to.
|
||||
* @completer: switch completion callback. if non-NULL,
|
||||
* @completer(@completer_cookie) will be called on completion of
|
||||
* the switch, in non-atomic context.
|
||||
* @completer_cookie: opaque context argument for @completer.
|
||||
*
|
||||
* This function causes a cluster switch on the given CPU by waking up
|
||||
* the appropriate switcher thread. This function may or may not return
|
||||
* before the switch has occurred.
|
||||
*
|
||||
* If a @completer callback function is supplied, it will be called when
|
||||
* the switch is complete. This can be used to determine asynchronously
|
||||
* when the switch is complete, regardless of when bL_switch_request()
|
||||
* returns. When @completer is supplied, no new switch request is permitted
|
||||
* for the affected CPU until after the switch is complete, and @completer
|
||||
* has returned.
|
||||
*/
|
||||
int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
|
||||
bL_switch_completion_handler completer,
|
||||
void *completer_cookie)
|
||||
{
|
||||
struct bL_thread *t;
|
||||
|
||||
if (cpu >= ARRAY_SIZE(bL_threads)) {
|
||||
pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
t = &bL_threads[cpu];
|
||||
|
||||
if (IS_ERR(t->task))
|
||||
return PTR_ERR(t->task);
|
||||
if (!t->task)
|
||||
return -ESRCH;
|
||||
|
||||
spin_lock(&t->lock);
|
||||
if (t->completer) {
|
||||
spin_unlock(&t->lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
t->completer = completer;
|
||||
t->completer_cookie = completer_cookie;
|
||||
t->wanted_cluster = new_cluster_id;
|
||||
spin_unlock(&t->lock);
|
||||
wake_up(&t->wq);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bL_switch_request_cb);
|
||||
|
||||
/*
|
||||
* Activation and configuration code.
|
||||
*/
|
||||
|
||||
static DEFINE_MUTEX(bL_switcher_activation_lock);
|
||||
static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier);
|
||||
static unsigned int bL_switcher_active;
|
||||
static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS];
|
||||
static cpumask_t bL_switcher_removed_logical_cpus;
|
||||
|
||||
int bL_switcher_register_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return blocking_notifier_chain_register(&bL_activation_notifier, nb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bL_switcher_register_notifier);
|
||||
|
||||
int bL_switcher_unregister_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return blocking_notifier_chain_unregister(&bL_activation_notifier, nb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier);
|
||||
|
||||
static int bL_activation_notify(unsigned long val)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL);
|
||||
if (ret & NOTIFY_STOP_MASK)
|
||||
pr_err("%s: notifier chain failed with status 0x%x\n",
|
||||
__func__, ret);
|
||||
return notifier_to_errno(ret);
|
||||
}
|
||||
|
||||
static void bL_switcher_restore_cpus(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_cpu(i, &bL_switcher_removed_logical_cpus)
|
||||
cpu_up(i);
|
||||
}
|
||||
|
||||
static int bL_switcher_halve_cpus(void)
|
||||
{
|
||||
int i, j, cluster_0, gic_id, ret;
|
||||
unsigned int cpu, cluster, mask;
|
||||
cpumask_t available_cpus;
|
||||
|
||||
/* First pass to validate what we have */
|
||||
mask = 0;
|
||||
for_each_online_cpu(i) {
|
||||
cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
|
||||
cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
|
||||
if (cluster >= 2) {
|
||||
pr_err("%s: only dual cluster systems are supported\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
|
||||
return -EINVAL;
|
||||
mask |= (1 << cluster);
|
||||
}
|
||||
if (mask != 3) {
|
||||
pr_err("%s: no CPU pairing possible\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now let's do the pairing. We match each CPU with another CPU
|
||||
* from a different cluster. To get a uniform scheduling behavior
|
||||
* without fiddling with CPU topology and compute capacity data,
|
||||
* we'll use logical CPUs initially belonging to the same cluster.
|
||||
*/
|
||||
memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing));
|
||||
cpumask_copy(&available_cpus, cpu_online_mask);
|
||||
cluster_0 = -1;
|
||||
for_each_cpu(i, &available_cpus) {
|
||||
int match = -1;
|
||||
cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
|
||||
if (cluster_0 == -1)
|
||||
cluster_0 = cluster;
|
||||
if (cluster != cluster_0)
|
||||
continue;
|
||||
cpumask_clear_cpu(i, &available_cpus);
|
||||
for_each_cpu(j, &available_cpus) {
|
||||
cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
|
||||
/*
|
||||
* Let's remember the last match to create "odd"
|
||||
* pairings on purpose in order for other code not
|
||||
* to assume any relation between physical and
|
||||
* logical CPU numbers.
|
||||
*/
|
||||
if (cluster != cluster_0)
|
||||
match = j;
|
||||
}
|
||||
if (match != -1) {
|
||||
bL_switcher_cpu_pairing[i] = match;
|
||||
cpumask_clear_cpu(match, &available_cpus);
|
||||
pr_info("CPU%d paired with CPU%d\n", i, match);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Now we disable the unwanted CPUs i.e. everything that has no
|
||||
* pairing information (that includes the pairing counterparts).
|
||||
*/
|
||||
cpumask_clear(&bL_switcher_removed_logical_cpus);
|
||||
for_each_online_cpu(i) {
|
||||
cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
|
||||
cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
|
||||
|
||||
/* Let's take note of the GIC ID for this CPU */
|
||||
gic_id = gic_get_cpu_id(i);
|
||||
if (gic_id < 0) {
|
||||
pr_err("%s: bad GIC ID for CPU %d\n", __func__, i);
|
||||
bL_switcher_restore_cpus();
|
||||
return -EINVAL;
|
||||
}
|
||||
bL_gic_id[cpu][cluster] = gic_id;
|
||||
pr_info("GIC ID for CPU %u cluster %u is %u\n",
|
||||
cpu, cluster, gic_id);
|
||||
|
||||
if (bL_switcher_cpu_pairing[i] != -1) {
|
||||
bL_switcher_cpu_original_cluster[i] = cluster;
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = cpu_down(i);
|
||||
if (ret) {
|
||||
bL_switcher_restore_cpus();
|
||||
return ret;
|
||||
}
|
||||
cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Determine the logical CPU a given physical CPU is grouped on. */
|
||||
int bL_switcher_get_logical_index(u32 mpidr)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (!bL_switcher_active)
|
||||
return -EUNATCH;
|
||||
|
||||
mpidr &= MPIDR_HWID_BITMASK;
|
||||
for_each_online_cpu(cpu) {
|
||||
int pairing = bL_switcher_cpu_pairing[cpu];
|
||||
if (pairing == -1)
|
||||
continue;
|
||||
if ((mpidr == cpu_logical_map(cpu)) ||
|
||||
(mpidr == cpu_logical_map(pairing)))
|
||||
return cpu;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
|
||||
{
|
||||
trace_cpu_migrate_current(get_ns(), read_mpidr());
|
||||
}
|
||||
|
||||
int bL_switcher_trace_trigger(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
bL_switcher_trace_trigger_cpu(NULL);
|
||||
ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
|
||||
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);
|
||||
|
||||
static int bL_switcher_enable(void)
|
||||
{
|
||||
int cpu, ret;
|
||||
|
||||
mutex_lock(&bL_switcher_activation_lock);
|
||||
lock_device_hotplug();
|
||||
if (bL_switcher_active) {
|
||||
unlock_device_hotplug();
|
||||
mutex_unlock(&bL_switcher_activation_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
pr_info("big.LITTLE switcher initializing\n");
|
||||
|
||||
ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = bL_switcher_halve_cpus();
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
bL_switcher_trace_trigger();
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
struct bL_thread *t = &bL_threads[cpu];
|
||||
spin_lock_init(&t->lock);
|
||||
init_waitqueue_head(&t->wq);
|
||||
init_completion(&t->started);
|
||||
t->wanted_cluster = -1;
|
||||
t->task = bL_switcher_thread_create(cpu, t);
|
||||
}
|
||||
|
||||
bL_switcher_active = 1;
|
||||
bL_activation_notify(BL_NOTIFY_POST_ENABLE);
|
||||
pr_info("big.LITTLE switcher initialized\n");
|
||||
goto out;
|
||||
|
||||
error:
|
||||
pr_warn("big.LITTLE switcher initialization failed\n");
|
||||
bL_activation_notify(BL_NOTIFY_POST_DISABLE);
|
||||
|
||||
out:
|
||||
unlock_device_hotplug();
|
||||
mutex_unlock(&bL_switcher_activation_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
|
||||
static void bL_switcher_disable(void)
|
||||
{
|
||||
unsigned int cpu, cluster;
|
||||
struct bL_thread *t;
|
||||
struct task_struct *task;
|
||||
|
||||
mutex_lock(&bL_switcher_activation_lock);
|
||||
lock_device_hotplug();
|
||||
|
||||
if (!bL_switcher_active)
|
||||
goto out;
|
||||
|
||||
if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) {
|
||||
bL_activation_notify(BL_NOTIFY_POST_ENABLE);
|
||||
goto out;
|
||||
}
|
||||
|
||||
bL_switcher_active = 0;
|
||||
|
||||
/*
|
||||
* To deactivate the switcher, we must shut down the switcher
|
||||
* threads to prevent any other requests from being accepted.
|
||||
* Then, if the final cluster for given logical CPU is not the
|
||||
* same as the original one, we'll recreate a switcher thread
|
||||
* just for the purpose of switching the CPU back without any
|
||||
* possibility for interference from external requests.
|
||||
*/
|
||||
for_each_online_cpu(cpu) {
|
||||
t = &bL_threads[cpu];
|
||||
task = t->task;
|
||||
t->task = NULL;
|
||||
if (!task || IS_ERR(task))
|
||||
continue;
|
||||
kthread_stop(task);
|
||||
/* no more switch may happen on this CPU at this point */
|
||||
cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
|
||||
if (cluster == bL_switcher_cpu_original_cluster[cpu])
|
||||
continue;
|
||||
init_completion(&t->started);
|
||||
t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
|
||||
task = bL_switcher_thread_create(cpu, t);
|
||||
if (!IS_ERR(task)) {
|
||||
wait_for_completion(&t->started);
|
||||
kthread_stop(task);
|
||||
cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
|
||||
if (cluster == bL_switcher_cpu_original_cluster[cpu])
|
||||
continue;
|
||||
}
|
||||
/* If execution gets here, we're in trouble. */
|
||||
pr_crit("%s: unable to restore original cluster for CPU %d\n",
|
||||
__func__, cpu);
|
||||
pr_crit("%s: CPU %d can't be restored\n",
|
||||
__func__, bL_switcher_cpu_pairing[cpu]);
|
||||
cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
|
||||
&bL_switcher_removed_logical_cpus);
|
||||
}
|
||||
|
||||
bL_switcher_restore_cpus();
|
||||
bL_switcher_trace_trigger();
|
||||
|
||||
bL_activation_notify(BL_NOTIFY_POST_DISABLE);
|
||||
|
||||
out:
|
||||
unlock_device_hotplug();
|
||||
mutex_unlock(&bL_switcher_activation_lock);
|
||||
}
|
||||
|
||||
static ssize_t bL_switcher_active_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%u\n", bL_switcher_active);
|
||||
}
|
||||
|
||||
static ssize_t bL_switcher_active_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, const char *buf, size_t count)
|
||||
{
|
||||
int ret;
|
||||
|
||||
switch (buf[0]) {
|
||||
case '0':
|
||||
bL_switcher_disable();
|
||||
ret = 0;
|
||||
break;
|
||||
case '1':
|
||||
ret = bL_switcher_enable();
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
return (ret >= 0) ? count : ret;
|
||||
}
|
||||
|
||||
static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, const char *buf, size_t count)
|
||||
{
|
||||
int ret = bL_switcher_trace_trigger();
|
||||
|
||||
return ret ? ret : count;
|
||||
}
|
||||
|
||||
static struct kobj_attribute bL_switcher_active_attr =
|
||||
__ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store);
|
||||
|
||||
static struct kobj_attribute bL_switcher_trace_trigger_attr =
|
||||
__ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store);
|
||||
|
||||
static struct attribute *bL_switcher_attrs[] = {
|
||||
&bL_switcher_active_attr.attr,
|
||||
&bL_switcher_trace_trigger_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group bL_switcher_attr_group = {
|
||||
.attrs = bL_switcher_attrs,
|
||||
};
|
||||
|
||||
static struct kobject *bL_switcher_kobj;
|
||||
|
||||
static int __init bL_switcher_sysfs_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj);
|
||||
if (!bL_switcher_kobj)
|
||||
return -ENOMEM;
|
||||
ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group);
|
||||
if (ret)
|
||||
kobject_put(bL_switcher_kobj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SYSFS */
|
||||
|
||||
bool bL_switcher_get_enabled(void)
|
||||
{
|
||||
mutex_lock(&bL_switcher_activation_lock);
|
||||
|
||||
return bL_switcher_active;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bL_switcher_get_enabled);
|
||||
|
||||
void bL_switcher_put_enabled(void)
|
||||
{
|
||||
mutex_unlock(&bL_switcher_activation_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
|
||||
|
||||
/*
|
||||
* Veto any CPU hotplug operation on those CPUs we've removed
|
||||
* while the switcher is active.
|
||||
* We're just not ready to deal with that given the trickery involved.
|
||||
*/
|
||||
static int bL_switcher_hotplug_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
if (bL_switcher_active) {
|
||||
int pairing = bL_switcher_cpu_pairing[(unsigned long)hcpu];
|
||||
switch (action & 0xf) {
|
||||
case CPU_UP_PREPARE:
|
||||
case CPU_DOWN_PREPARE:
|
||||
if (pairing == -1)
|
||||
return NOTIFY_BAD;
|
||||
}
|
||||
}
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static bool no_bL_switcher;
|
||||
core_param(no_bL_switcher, no_bL_switcher, bool, 0644);
|
||||
|
||||
static int __init bL_switcher_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (MAX_NR_CLUSTERS != 2) {
|
||||
pr_err("%s: only dual cluster systems are supported\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cpu_notifier(bL_switcher_hotplug_callback, 0);
|
||||
|
||||
if (!no_bL_switcher) {
|
||||
ret = bL_switcher_enable();
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
ret = bL_switcher_sysfs_init();
|
||||
if (ret)
|
||||
pr_err("%s: unable to create sysfs entry\n", __func__);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
late_initcall(bL_switcher_init);
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* arch/arm/common/bL_switcher_dummy_if.c -- b.L switcher dummy interface
|
||||
*
|
||||
* Created by: Nicolas Pitre, November 2012
|
||||
* Copyright: (C) 2012-2013 Linaro Limited
|
||||
*
|
||||
* Dummy interface to user space for debugging purpose only.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/bL_switcher.h>
|
||||
|
||||
static ssize_t bL_switcher_write(struct file *file, const char __user *buf,
|
||||
size_t len, loff_t *pos)
|
||||
{
|
||||
unsigned char val[3];
|
||||
unsigned int cpu, cluster;
|
||||
int ret;
|
||||
|
||||
pr_debug("%s\n", __func__);
|
||||
|
||||
if (len < 3)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(val, buf, 3))
|
||||
return -EFAULT;
|
||||
|
||||
/* format: <cpu#>,<cluster#> */
|
||||
if (val[0] < '0' || val[0] > '9' ||
|
||||
val[1] != ',' ||
|
||||
val[2] < '0' || val[2] > '1')
|
||||
return -EINVAL;
|
||||
|
||||
cpu = val[0] - '0';
|
||||
cluster = val[2] - '0';
|
||||
ret = bL_switch_request(cpu, cluster);
|
||||
|
||||
return ret ? : len;
|
||||
}
|
||||
|
||||
static const struct file_operations bL_switcher_fops = {
|
||||
.write = bL_switcher_write,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct miscdevice bL_switcher_device = {
|
||||
MISC_DYNAMIC_MINOR,
|
||||
"b.L_switcher",
|
||||
&bL_switcher_fops
|
||||
};
|
||||
|
||||
static int __init bL_switcher_dummy_if_init(void)
|
||||
{
|
||||
return misc_register(&bL_switcher_device);
|
||||
}
|
||||
|
||||
static void __exit bL_switcher_dummy_if_exit(void)
|
||||
{
|
||||
misc_deregister(&bL_switcher_device);
|
||||
}
|
||||
|
||||
module_init(bL_switcher_dummy_if_init);
|
||||
module_exit(bL_switcher_dummy_if_exit);
|
|
@ -27,6 +27,18 @@ void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
|
|||
sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
|
||||
}
|
||||
|
||||
extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2];
|
||||
|
||||
void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
|
||||
unsigned long poke_phys_addr, unsigned long poke_val)
|
||||
{
|
||||
unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
|
||||
poke[0] = poke_phys_addr;
|
||||
poke[1] = poke_val;
|
||||
__cpuc_flush_dcache_area((void *)poke, 8);
|
||||
outer_clean_range(__pa(poke), __pa(poke + 2));
|
||||
}
|
||||
|
||||
static const struct mcpm_platform_ops *platform_ops;
|
||||
|
||||
int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/mcpm.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
#include "vlock.h"
|
||||
|
||||
|
@ -47,6 +48,7 @@
|
|||
|
||||
ENTRY(mcpm_entry_point)
|
||||
|
||||
ARM_BE8(setend be)
|
||||
THUMB( adr r12, BSYM(1f) )
|
||||
THUMB( bx r12 )
|
||||
THUMB( .thumb )
|
||||
|
@ -71,12 +73,19 @@ ENTRY(mcpm_entry_point)
|
|||
* position independent way.
|
||||
*/
|
||||
adr r5, 3f
|
||||
ldmia r5, {r6, r7, r8, r11}
|
||||
ldmia r5, {r0, r6, r7, r8, r11}
|
||||
add r0, r5, r0 @ r0 = mcpm_entry_early_pokes
|
||||
add r6, r5, r6 @ r6 = mcpm_entry_vectors
|
||||
ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys
|
||||
add r8, r5, r8 @ r8 = mcpm_sync
|
||||
add r11, r5, r11 @ r11 = first_man_locks
|
||||
|
||||
@ Perform an early poke, if any
|
||||
add r0, r0, r4, lsl #3
|
||||
ldmia r0, {r0, r1}
|
||||
teq r0, #0
|
||||
strne r1, [r0]
|
||||
|
||||
mov r0, #MCPM_SYNC_CLUSTER_SIZE
|
||||
mla r8, r0, r10, r8 @ r8 = sync cluster base
|
||||
|
||||
|
@ -195,7 +204,8 @@ mcpm_entry_gated:
|
|||
|
||||
.align 2
|
||||
|
||||
3: .word mcpm_entry_vectors - .
|
||||
3: .word mcpm_entry_early_pokes - .
|
||||
.word mcpm_entry_vectors - 3b
|
||||
.word mcpm_power_up_setup_phys - 3b
|
||||
.word mcpm_sync - 3b
|
||||
.word first_man_locks - 3b
|
||||
|
@ -214,6 +224,10 @@ first_man_locks:
|
|||
ENTRY(mcpm_entry_vectors)
|
||||
.space 4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
|
||||
|
||||
.type mcpm_entry_early_pokes, #object
|
||||
ENTRY(mcpm_entry_early_pokes)
|
||||
.space 8 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
|
||||
|
||||
.type mcpm_power_up_setup_phys, #object
|
||||
ENTRY(mcpm_power_up_setup_phys)
|
||||
.space 4 @ set by mcpm_sync_init()
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
aesbs-core.S
|
|
@ -3,7 +3,17 @@
|
|||
#
|
||||
|
||||
obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
|
||||
obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
|
||||
obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
|
||||
|
||||
aes-arm-y := aes-armv4.o aes_glue.o
|
||||
sha1-arm-y := sha1-armv4-large.o sha1_glue.o
|
||||
aes-arm-y := aes-armv4.o aes_glue.o
|
||||
aes-arm-bs-y := aesbs-core.o aesbs-glue.o
|
||||
sha1-arm-y := sha1-armv4-large.o sha1_glue.o
|
||||
|
||||
quiet_cmd_perl = PERL $@
|
||||
cmd_perl = $(PERL) $(<) > $(@)
|
||||
|
||||
$(src)/aesbs-core.S_shipped: $(src)/bsaes-armv7.pl
|
||||
$(call cmd,perl)
|
||||
|
||||
.PRECIOUS: $(obj)/aesbs-core.S
|
||||
|
|
|
@ -6,22 +6,12 @@
|
|||
#include <linux/crypto.h>
|
||||
#include <crypto/aes.h>
|
||||
|
||||
#define AES_MAXNR 14
|
||||
#include "aes_glue.h"
|
||||
|
||||
typedef struct {
|
||||
unsigned int rd_key[4 *(AES_MAXNR + 1)];
|
||||
int rounds;
|
||||
} AES_KEY;
|
||||
|
||||
struct AES_CTX {
|
||||
AES_KEY enc_key;
|
||||
AES_KEY dec_key;
|
||||
};
|
||||
|
||||
asmlinkage void AES_encrypt(const u8 *in, u8 *out, AES_KEY *ctx);
|
||||
asmlinkage void AES_decrypt(const u8 *in, u8 *out, AES_KEY *ctx);
|
||||
asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
|
||||
asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
|
||||
EXPORT_SYMBOL(AES_encrypt);
|
||||
EXPORT_SYMBOL(AES_decrypt);
|
||||
EXPORT_SYMBOL(private_AES_set_encrypt_key);
|
||||
EXPORT_SYMBOL(private_AES_set_decrypt_key);
|
||||
|
||||
static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
|
@ -81,7 +71,7 @@ static struct crypto_alg aes_alg = {
|
|||
.cipher = {
|
||||
.cia_min_keysize = AES_MIN_KEY_SIZE,
|
||||
.cia_max_keysize = AES_MAX_KEY_SIZE,
|
||||
.cia_setkey = aes_set_key,
|
||||
.cia_setkey = aes_set_key,
|
||||
.cia_encrypt = aes_encrypt,
|
||||
.cia_decrypt = aes_decrypt
|
||||
}
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
|
||||
#define AES_MAXNR 14
|
||||
|
||||
struct AES_KEY {
|
||||
unsigned int rd_key[4 * (AES_MAXNR + 1)];
|
||||
int rounds;
|
||||
};
|
||||
|
||||
struct AES_CTX {
|
||||
struct AES_KEY enc_key;
|
||||
struct AES_KEY dec_key;
|
||||
};
|
||||
|
||||
asmlinkage void AES_encrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
|
||||
asmlinkage void AES_decrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
|
||||
asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey,
|
||||
const int bits, struct AES_KEY *key);
|
||||
asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey,
|
||||
const int bits, struct AES_KEY *key);
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,434 @@
|
|||
/*
|
||||
* linux/arch/arm/crypto/aesbs-glue.c - glue code for NEON bit sliced AES
|
||||
*
|
||||
* Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <asm/neon.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/ablk_helper.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "aes_glue.h"
|
||||
|
||||
#define BIT_SLICED_KEY_MAXSIZE (128 * (AES_MAXNR - 1) + 2 * AES_BLOCK_SIZE)
|
||||
|
||||
struct BS_KEY {
|
||||
struct AES_KEY rk;
|
||||
int converted;
|
||||
u8 __aligned(8) bs[BIT_SLICED_KEY_MAXSIZE];
|
||||
} __aligned(8);
|
||||
|
||||
asmlinkage void bsaes_enc_key_convert(u8 out[], struct AES_KEY const *in);
|
||||
asmlinkage void bsaes_dec_key_convert(u8 out[], struct AES_KEY const *in);
|
||||
|
||||
asmlinkage void bsaes_cbc_encrypt(u8 const in[], u8 out[], u32 bytes,
|
||||
struct BS_KEY *key, u8 iv[]);
|
||||
|
||||
asmlinkage void bsaes_ctr32_encrypt_blocks(u8 const in[], u8 out[], u32 blocks,
|
||||
struct BS_KEY *key, u8 const iv[]);
|
||||
|
||||
asmlinkage void bsaes_xts_encrypt(u8 const in[], u8 out[], u32 bytes,
|
||||
struct BS_KEY *key, u8 tweak[]);
|
||||
|
||||
asmlinkage void bsaes_xts_decrypt(u8 const in[], u8 out[], u32 bytes,
|
||||
struct BS_KEY *key, u8 tweak[]);
|
||||
|
||||
struct aesbs_cbc_ctx {
|
||||
struct AES_KEY enc;
|
||||
struct BS_KEY dec;
|
||||
};
|
||||
|
||||
struct aesbs_ctr_ctx {
|
||||
struct BS_KEY enc;
|
||||
};
|
||||
|
||||
struct aesbs_xts_ctx {
|
||||
struct BS_KEY enc;
|
||||
struct BS_KEY dec;
|
||||
struct AES_KEY twkey;
|
||||
};
|
||||
|
||||
static int aesbs_cbc_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
int bits = key_len * 8;
|
||||
|
||||
if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc)) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
return -EINVAL;
|
||||
}
|
||||
ctx->dec.rk = ctx->enc;
|
||||
private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
|
||||
ctx->dec.converted = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aesbs_ctr_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct aesbs_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
int bits = key_len * 8;
|
||||
|
||||
if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
return -EINVAL;
|
||||
}
|
||||
ctx->enc.converted = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aesbs_xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
int bits = key_len * 4;
|
||||
|
||||
if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
return -EINVAL;
|
||||
}
|
||||
ctx->dec.rk = ctx->enc.rk;
|
||||
private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
|
||||
private_AES_set_encrypt_key(in_key + key_len / 2, bits, &ctx->twkey);
|
||||
ctx->enc.converted = ctx->dec.converted = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while (walk.nbytes) {
|
||||
u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
u8 *src = walk.src.virt.addr;
|
||||
|
||||
if (walk.dst.virt.addr == walk.src.virt.addr) {
|
||||
u8 *iv = walk.iv;
|
||||
|
||||
do {
|
||||
crypto_xor(src, iv, AES_BLOCK_SIZE);
|
||||
AES_encrypt(src, src, &ctx->enc);
|
||||
iv = src;
|
||||
src += AES_BLOCK_SIZE;
|
||||
} while (--blocks);
|
||||
memcpy(walk.iv, iv, AES_BLOCK_SIZE);
|
||||
} else {
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
|
||||
do {
|
||||
crypto_xor(walk.iv, src, AES_BLOCK_SIZE);
|
||||
AES_encrypt(walk.iv, dst, &ctx->enc);
|
||||
memcpy(walk.iv, dst, AES_BLOCK_SIZE);
|
||||
src += AES_BLOCK_SIZE;
|
||||
dst += AES_BLOCK_SIZE;
|
||||
} while (--blocks);
|
||||
}
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
|
||||
|
||||
while ((walk.nbytes / AES_BLOCK_SIZE) >= 8) {
|
||||
kernel_neon_begin();
|
||||
bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
|
||||
walk.nbytes, &ctx->dec, walk.iv);
|
||||
kernel_neon_end();
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
}
|
||||
while (walk.nbytes) {
|
||||
u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
u8 *src = walk.src.virt.addr;
|
||||
u8 bk[2][AES_BLOCK_SIZE];
|
||||
u8 *iv = walk.iv;
|
||||
|
||||
do {
|
||||
if (walk.dst.virt.addr == walk.src.virt.addr)
|
||||
memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE);
|
||||
|
||||
AES_decrypt(src, dst, &ctx->dec.rk);
|
||||
crypto_xor(dst, iv, AES_BLOCK_SIZE);
|
||||
|
||||
if (walk.dst.virt.addr == walk.src.virt.addr)
|
||||
iv = bk[blocks & 1];
|
||||
else
|
||||
iv = src;
|
||||
|
||||
dst += AES_BLOCK_SIZE;
|
||||
src += AES_BLOCK_SIZE;
|
||||
} while (--blocks);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static void inc_be128_ctr(__be32 ctr[], u32 addend)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 3; i >= 0; i--, addend = 1) {
|
||||
u32 n = be32_to_cpu(ctr[i]) + addend;
|
||||
|
||||
ctr[i] = cpu_to_be32(n);
|
||||
if (n >= addend)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct aesbs_ctr_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
u32 blocks;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
|
||||
|
||||
while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) {
|
||||
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
|
||||
__be32 *ctr = (__be32 *)walk.iv;
|
||||
u32 headroom = UINT_MAX - be32_to_cpu(ctr[3]);
|
||||
|
||||
/* avoid 32 bit counter overflow in the NEON code */
|
||||
if (unlikely(headroom < blocks)) {
|
||||
blocks = headroom + 1;
|
||||
tail = walk.nbytes - blocks * AES_BLOCK_SIZE;
|
||||
}
|
||||
kernel_neon_begin();
|
||||
bsaes_ctr32_encrypt_blocks(walk.src.virt.addr,
|
||||
walk.dst.virt.addr, blocks,
|
||||
&ctx->enc, walk.iv);
|
||||
kernel_neon_end();
|
||||
inc_be128_ctr(ctr, blocks);
|
||||
|
||||
nbytes -= blocks * AES_BLOCK_SIZE;
|
||||
if (nbytes && nbytes == tail && nbytes <= AES_BLOCK_SIZE)
|
||||
break;
|
||||
|
||||
err = blkcipher_walk_done(desc, &walk, tail);
|
||||
}
|
||||
if (walk.nbytes) {
|
||||
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
|
||||
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
|
||||
u8 ks[AES_BLOCK_SIZE];
|
||||
|
||||
AES_encrypt(walk.iv, ks, &ctx->enc.rk);
|
||||
if (tdst != tsrc)
|
||||
memcpy(tdst, tsrc, nbytes);
|
||||
crypto_xor(tdst, ks, nbytes);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
|
||||
|
||||
/* generate the initial tweak */
|
||||
AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
|
||||
|
||||
while (walk.nbytes) {
|
||||
kernel_neon_begin();
|
||||
bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
|
||||
walk.nbytes, &ctx->enc, walk.iv);
|
||||
kernel_neon_end();
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
|
||||
|
||||
/* generate the initial tweak */
|
||||
AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
|
||||
|
||||
while (walk.nbytes) {
|
||||
kernel_neon_begin();
|
||||
bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
|
||||
walk.nbytes, &ctx->dec, walk.iv);
|
||||
kernel_neon_end();
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_alg aesbs_algs[] = { {
|
||||
.cra_name = "__cbc-aes-neonbs",
|
||||
.cra_driver_name = "__driver-cbc-aes-neonbs",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = aesbs_cbc_set_key,
|
||||
.encrypt = aesbs_cbc_encrypt,
|
||||
.decrypt = aesbs_cbc_decrypt,
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__ctr-aes-neonbs",
|
||||
.cra_driver_name = "__driver-ctr-aes-neonbs",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct aesbs_ctr_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = aesbs_ctr_set_key,
|
||||
.encrypt = aesbs_ctr_encrypt,
|
||||
.decrypt = aesbs_ctr_encrypt,
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__xts-aes-neonbs",
|
||||
.cra_driver_name = "__driver-xts-aes-neonbs",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct aesbs_xts_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_blkcipher = {
|
||||
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = aesbs_xts_set_key,
|
||||
.encrypt = aesbs_xts_encrypt,
|
||||
.decrypt = aesbs_xts_decrypt,
|
||||
},
|
||||
}, {
|
||||
.cra_name = "cbc(aes)",
|
||||
.cra_driver_name = "cbc-aes-neonbs",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_ablkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = __ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
}
|
||||
}, {
|
||||
.cra_name = "ctr(aes)",
|
||||
.cra_driver_name = "ctr-aes-neonbs",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_ablkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
}
|
||||
}, {
|
||||
.cra_name = "xts(aes)",
|
||||
.cra_driver_name = "xts-aes-neonbs",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_ablkcipher = {
|
||||
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
}
|
||||
} };
|
||||
|
||||
static int __init aesbs_mod_init(void)
|
||||
{
|
||||
if (!cpu_has_neon())
|
||||
return -ENODEV;
|
||||
|
||||
return crypto_register_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
|
||||
}
|
||||
|
||||
static void __exit aesbs_mod_exit(void)
|
||||
{
|
||||
crypto_unregister_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
|
||||
}
|
||||
|
||||
module_init(aesbs_mod_init);
|
||||
module_exit(aesbs_mod_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Bit sliced AES in CBC/CTR/XTS modes using NEON");
|
||||
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
|
||||
MODULE_LICENSE("GPL");
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -24,6 +24,7 @@ generic-y += sembuf.h
|
|||
generic-y += serial.h
|
||||
generic-y += shmbuf.h
|
||||
generic-y += siginfo.h
|
||||
generic-y += simd.h
|
||||
generic-y += sizes.h
|
||||
generic-y += socket.h
|
||||
generic-y += sockios.h
|
||||
|
|
|
@ -53,6 +53,13 @@
|
|||
#define put_byte_3 lsl #0
|
||||
#endif
|
||||
|
||||
/* Select code for any configuration running in BE8 mode */
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
#define ARM_BE8(code...) code
|
||||
#else
|
||||
#define ARM_BE8(code...)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Data preload for architectures that support it
|
||||
*/
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#define __ASM_ARM_ATOMIC_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <asm/barrier.h>
|
||||
|
@ -41,6 +42,7 @@ static inline void atomic_add(int i, atomic_t *v)
|
|||
unsigned long tmp;
|
||||
int result;
|
||||
|
||||
prefetchw(&v->counter);
|
||||
__asm__ __volatile__("@ atomic_add\n"
|
||||
"1: ldrex %0, [%3]\n"
|
||||
" add %0, %0, %4\n"
|
||||
|
@ -79,6 +81,7 @@ static inline void atomic_sub(int i, atomic_t *v)
|
|||
unsigned long tmp;
|
||||
int result;
|
||||
|
||||
prefetchw(&v->counter);
|
||||
__asm__ __volatile__("@ atomic_sub\n"
|
||||
"1: ldrex %0, [%3]\n"
|
||||
" sub %0, %0, %4\n"
|
||||
|
@ -260,6 +263,7 @@ static inline void atomic64_set(atomic64_t *v, long long i)
|
|||
{
|
||||
long long tmp;
|
||||
|
||||
prefetchw(&v->counter);
|
||||
__asm__ __volatile__("@ atomic64_set\n"
|
||||
"1: ldrexd %0, %H0, [%2]\n"
|
||||
" strexd %0, %3, %H3, [%2]\n"
|
||||
|
@ -276,10 +280,11 @@ static inline void atomic64_add(long long i, atomic64_t *v)
|
|||
long long result;
|
||||
unsigned long tmp;
|
||||
|
||||
prefetchw(&v->counter);
|
||||
__asm__ __volatile__("@ atomic64_add\n"
|
||||
"1: ldrexd %0, %H0, [%3]\n"
|
||||
" adds %0, %0, %4\n"
|
||||
" adc %H0, %H0, %H4\n"
|
||||
" adds %Q0, %Q0, %Q4\n"
|
||||
" adc %R0, %R0, %R4\n"
|
||||
" strexd %1, %0, %H0, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
|
@ -297,8 +302,8 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
|
|||
|
||||
__asm__ __volatile__("@ atomic64_add_return\n"
|
||||
"1: ldrexd %0, %H0, [%3]\n"
|
||||
" adds %0, %0, %4\n"
|
||||
" adc %H0, %H0, %H4\n"
|
||||
" adds %Q0, %Q0, %Q4\n"
|
||||
" adc %R0, %R0, %R4\n"
|
||||
" strexd %1, %0, %H0, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
|
@ -316,10 +321,11 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
|
|||
long long result;
|
||||
unsigned long tmp;
|
||||
|
||||
prefetchw(&v->counter);
|
||||
__asm__ __volatile__("@ atomic64_sub\n"
|
||||
"1: ldrexd %0, %H0, [%3]\n"
|
||||
" subs %0, %0, %4\n"
|
||||
" sbc %H0, %H0, %H4\n"
|
||||
" subs %Q0, %Q0, %Q4\n"
|
||||
" sbc %R0, %R0, %R4\n"
|
||||
" strexd %1, %0, %H0, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
|
@ -337,8 +343,8 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
|
|||
|
||||
__asm__ __volatile__("@ atomic64_sub_return\n"
|
||||
"1: ldrexd %0, %H0, [%3]\n"
|
||||
" subs %0, %0, %4\n"
|
||||
" sbc %H0, %H0, %H4\n"
|
||||
" subs %Q0, %Q0, %Q4\n"
|
||||
" sbc %R0, %R0, %R4\n"
|
||||
" strexd %1, %0, %H0, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
|
@ -406,9 +412,9 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
|||
|
||||
__asm__ __volatile__("@ atomic64_dec_if_positive\n"
|
||||
"1: ldrexd %0, %H0, [%3]\n"
|
||||
" subs %0, %0, #1\n"
|
||||
" sbc %H0, %H0, #0\n"
|
||||
" teq %H0, #0\n"
|
||||
" subs %Q0, %Q0, #1\n"
|
||||
" sbc %R0, %R0, #0\n"
|
||||
" teq %R0, #0\n"
|
||||
" bmi 2f\n"
|
||||
" strexd %1, %0, %H0, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
|
@ -437,8 +443,8 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
|
|||
" teqeq %H0, %H5\n"
|
||||
" moveq %1, #0\n"
|
||||
" beq 2f\n"
|
||||
" adds %0, %0, %6\n"
|
||||
" adc %H0, %H0, %H6\n"
|
||||
" adds %Q0, %Q0, %Q6\n"
|
||||
" adc %R0, %R0, %R6\n"
|
||||
" strexd %2, %0, %H0, [%4]\n"
|
||||
" teq %2, #0\n"
|
||||
" bne 1b\n"
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
* arch/arm/include/asm/bL_switcher.h
|
||||
*
|
||||
* Created by: Nicolas Pitre, April 2012
|
||||
* Copyright: (C) 2012-2013 Linaro Limited
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef ASM_BL_SWITCHER_H
|
||||
#define ASM_BL_SWITCHER_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
typedef void (*bL_switch_completion_handler)(void *cookie);
|
||||
|
||||
int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
|
||||
bL_switch_completion_handler completer,
|
||||
void *completer_cookie);
|
||||
static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
|
||||
{
|
||||
return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Register here to be notified about runtime enabling/disabling of
|
||||
* the switcher.
|
||||
*
|
||||
* The notifier chain is called with the switcher activation lock held:
|
||||
* the switcher will not be enabled or disabled during callbacks.
|
||||
* Callbacks must not call bL_switcher_{get,put}_enabled().
|
||||
*/
|
||||
#define BL_NOTIFY_PRE_ENABLE 0
|
||||
#define BL_NOTIFY_POST_ENABLE 1
|
||||
#define BL_NOTIFY_PRE_DISABLE 2
|
||||
#define BL_NOTIFY_POST_DISABLE 3
|
||||
|
||||
#ifdef CONFIG_BL_SWITCHER
|
||||
|
||||
int bL_switcher_register_notifier(struct notifier_block *nb);
|
||||
int bL_switcher_unregister_notifier(struct notifier_block *nb);
|
||||
|
||||
/*
|
||||
* Use these functions to temporarily prevent enabling/disabling of
|
||||
* the switcher.
|
||||
* bL_switcher_get_enabled() returns true if the switcher is currently
|
||||
* enabled. Each call to bL_switcher_get_enabled() must be followed
|
||||
* by a call to bL_switcher_put_enabled(). These functions are not
|
||||
* recursive.
|
||||
*/
|
||||
bool bL_switcher_get_enabled(void);
|
||||
void bL_switcher_put_enabled(void);
|
||||
|
||||
int bL_switcher_trace_trigger(void);
|
||||
int bL_switcher_get_logical_index(u32 mpidr);
|
||||
|
||||
#else
|
||||
static inline int bL_switcher_register_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool bL_switcher_get_enabled(void) { return false; }
|
||||
static inline void bL_switcher_put_enabled(void) { }
|
||||
static inline int bL_switcher_trace_trigger(void) { return 0; }
|
||||
static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
|
||||
#endif /* CONFIG_BL_SWITCHER */
|
||||
|
||||
#endif
|
|
@ -2,6 +2,8 @@
|
|||
#define _ASMARM_BUG_H
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/opcodes.h>
|
||||
|
||||
#ifdef CONFIG_BUG
|
||||
|
||||
|
@ -12,10 +14,10 @@
|
|||
*/
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
#define BUG_INSTR_VALUE 0xde02
|
||||
#define BUG_INSTR_TYPE ".hword "
|
||||
#define BUG_INSTR(__value) __inst_thumb16(__value)
|
||||
#else
|
||||
#define BUG_INSTR_VALUE 0xe7f001f2
|
||||
#define BUG_INSTR_TYPE ".word "
|
||||
#define BUG_INSTR(__value) __inst_arm(__value)
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -33,7 +35,7 @@
|
|||
|
||||
#define __BUG(__file, __line, __value) \
|
||||
do { \
|
||||
asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n" \
|
||||
asm volatile("1:\t" BUG_INSTR(__value) "\n" \
|
||||
".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
|
||||
"2:\t.asciz " #__file "\n" \
|
||||
".popsection\n" \
|
||||
|
@ -48,7 +50,7 @@ do { \
|
|||
|
||||
#define __BUG(__file, __line, __value) \
|
||||
do { \
|
||||
asm volatile(BUG_INSTR_TYPE #__value); \
|
||||
asm volatile(BUG_INSTR(__value) "\n"); \
|
||||
unreachable(); \
|
||||
} while (0)
|
||||
#endif /* CONFIG_DEBUG_BUGVERBOSE */
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#include <linux/threads.h>
|
||||
#include <asm/irq.h>
|
||||
|
||||
#define NR_IPI 7
|
||||
#define NR_IPI 8
|
||||
|
||||
typedef struct {
|
||||
unsigned int __softirq_pending;
|
||||
|
|
|
@ -24,8 +24,8 @@
|
|||
#define TRACER_TIMEOUT 10000
|
||||
|
||||
#define etm_writel(t, v, x) \
|
||||
(__raw_writel((v), (t)->etm_regs + (x)))
|
||||
#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x)))
|
||||
(writel_relaxed((v), (t)->etm_regs + (x)))
|
||||
#define etm_readl(t, x) (readl_relaxed((t)->etm_regs + (x)))
|
||||
|
||||
/* CoreSight Management Registers */
|
||||
#define CSMR_LOCKACCESS 0xfb0
|
||||
|
@ -142,8 +142,8 @@
|
|||
#define ETBFF_TRIGFL BIT(10)
|
||||
|
||||
#define etb_writel(t, v, x) \
|
||||
(__raw_writel((v), (t)->etb_regs + (x)))
|
||||
#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
|
||||
(writel_relaxed((v), (t)->etb_regs + (x)))
|
||||
#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x)))
|
||||
|
||||
#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
|
||||
#define etm_unlock(t) \
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#define __ARM_KGDB_H__
|
||||
|
||||
#include <linux/ptrace.h>
|
||||
#include <asm/opcodes.h>
|
||||
|
||||
/*
|
||||
* GDB assumes that we're a user process being debugged, so
|
||||
|
@ -41,7 +42,7 @@
|
|||
|
||||
static inline void arch_kgdb_breakpoint(void)
|
||||
{
|
||||
asm(".word 0xe7ffdeff");
|
||||
asm(__inst_arm(0xe7ffdeff));
|
||||
}
|
||||
|
||||
extern void kgdb_handle_bus_error(void);
|
||||
|
|
|
@ -49,6 +49,7 @@ struct machine_desc {
|
|||
bool (*smp_init)(void);
|
||||
void (*fixup)(struct tag *, char **,
|
||||
struct meminfo *);
|
||||
void (*init_meminfo)(void);
|
||||
void (*reserve)(void);/* reserve mem blocks */
|
||||
void (*map_io)(void);/* IO mapping function */
|
||||
void (*init_early)(void);
|
||||
|
|
|
@ -41,6 +41,14 @@ extern void mcpm_entry_point(void);
|
|||
*/
|
||||
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
|
||||
|
||||
/*
|
||||
* This sets an early poke i.e a value to be poked into some address
|
||||
* from very early assembly code before the CPU is ungated. The
|
||||
* address must be physical, and if 0 then nothing will happen.
|
||||
*/
|
||||
void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
|
||||
unsigned long poke_phys_addr, unsigned long poke_val);
|
||||
|
||||
/*
|
||||
* CPU/cluster power operations API for higher subsystems to use.
|
||||
*/
|
||||
|
|
|
@ -172,8 +172,13 @@
|
|||
* so that all we need to do is modify the 8-bit constant field.
|
||||
*/
|
||||
#define __PV_BITS_31_24 0x81000000
|
||||
#define __PV_BITS_7_0 0x81
|
||||
|
||||
extern u64 __pv_phys_offset;
|
||||
extern u64 __pv_offset;
|
||||
extern void fixup_pv_table(const void *, unsigned long);
|
||||
extern const void *__pv_table_begin, *__pv_table_end;
|
||||
|
||||
extern unsigned long __pv_phys_offset;
|
||||
#define PHYS_OFFSET __pv_phys_offset
|
||||
|
||||
#define __pv_stub(from,to,instr,type) \
|
||||
|
@ -185,22 +190,58 @@ extern unsigned long __pv_phys_offset;
|
|||
: "=r" (to) \
|
||||
: "r" (from), "I" (type))
|
||||
|
||||
static inline unsigned long __virt_to_phys(unsigned long x)
|
||||
#define __pv_stub_mov_hi(t) \
|
||||
__asm__ volatile("@ __pv_stub_mov\n" \
|
||||
"1: mov %R0, %1\n" \
|
||||
" .pushsection .pv_table,\"a\"\n" \
|
||||
" .long 1b\n" \
|
||||
" .popsection\n" \
|
||||
: "=r" (t) \
|
||||
: "I" (__PV_BITS_7_0))
|
||||
|
||||
#define __pv_add_carry_stub(x, y) \
|
||||
__asm__ volatile("@ __pv_add_carry_stub\n" \
|
||||
"1: adds %Q0, %1, %2\n" \
|
||||
" adc %R0, %R0, #0\n" \
|
||||
" .pushsection .pv_table,\"a\"\n" \
|
||||
" .long 1b\n" \
|
||||
" .popsection\n" \
|
||||
: "+r" (y) \
|
||||
: "r" (x), "I" (__PV_BITS_31_24) \
|
||||
: "cc")
|
||||
|
||||
static inline phys_addr_t __virt_to_phys(unsigned long x)
|
||||
{
|
||||
unsigned long t;
|
||||
__pv_stub(x, t, "add", __PV_BITS_31_24);
|
||||
phys_addr_t t;
|
||||
|
||||
if (sizeof(phys_addr_t) == 4) {
|
||||
__pv_stub(x, t, "add", __PV_BITS_31_24);
|
||||
} else {
|
||||
__pv_stub_mov_hi(t);
|
||||
__pv_add_carry_stub(x, t);
|
||||
}
|
||||
return t;
|
||||
}
|
||||
|
||||
static inline unsigned long __phys_to_virt(unsigned long x)
|
||||
static inline unsigned long __phys_to_virt(phys_addr_t x)
|
||||
{
|
||||
unsigned long t;
|
||||
__pv_stub(x, t, "sub", __PV_BITS_31_24);
|
||||
return t;
|
||||
}
|
||||
|
||||
#else
|
||||
#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
|
||||
#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
|
||||
|
||||
static inline phys_addr_t __virt_to_phys(unsigned long x)
|
||||
{
|
||||
return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
|
||||
}
|
||||
|
||||
static inline unsigned long __phys_to_virt(phys_addr_t x)
|
||||
{
|
||||
return x - PHYS_OFFSET + PAGE_OFFSET;
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
@ -238,16 +279,33 @@ static inline phys_addr_t virt_to_phys(const volatile void *x)
|
|||
|
||||
static inline void *phys_to_virt(phys_addr_t x)
|
||||
{
|
||||
return (void *)(__phys_to_virt((unsigned long)(x)));
|
||||
return (void *)__phys_to_virt(x);
|
||||
}
|
||||
|
||||
/*
|
||||
* Drivers should NOT use these either.
|
||||
*/
|
||||
#define __pa(x) __virt_to_phys((unsigned long)(x))
|
||||
#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
|
||||
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
|
||||
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
|
||||
|
||||
extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
|
||||
|
||||
/*
|
||||
* These are for systems that have a hardware interconnect supported alias of
|
||||
* physical memory for idmap purposes. Most cases should leave these
|
||||
* untouched.
|
||||
*/
|
||||
static inline phys_addr_t __virt_to_idmap(unsigned long x)
|
||||
{
|
||||
if (arch_virt_to_idmap)
|
||||
return arch_virt_to_idmap(x);
|
||||
else
|
||||
return __virt_to_phys(x);
|
||||
}
|
||||
|
||||
#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
|
||||
|
||||
/*
|
||||
* Virtual <-> DMA view memory address translations
|
||||
* Again, these are *only* valid on the kernel direct mapped RAM
|
||||
|
|
|
@ -16,7 +16,7 @@ typedef struct {
|
|||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
#define ASID_BITS 8
|
||||
#define ASID_MASK ((~0ULL) << ASID_BITS)
|
||||
#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK)
|
||||
#define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK))
|
||||
#else
|
||||
#define ASID(mm) (0)
|
||||
#endif
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <asm/hw_breakpoint.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/types.h>
|
||||
#include <asm/unified.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
|
||||
|
@ -87,6 +88,17 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
|
||||
#define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define __ALT_SMP_ASM(smp, up) \
|
||||
"9998: " smp "\n" \
|
||||
" .pushsection \".alt.smp.init\", \"a\"\n" \
|
||||
" .long 9998b\n" \
|
||||
" " up "\n" \
|
||||
" .popsection\n"
|
||||
#else
|
||||
#define __ALT_SMP_ASM(smp, up) up
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Prefetching support - only ARMv5.
|
||||
*/
|
||||
|
@ -97,17 +109,22 @@ static inline void prefetch(const void *ptr)
|
|||
{
|
||||
__asm__ __volatile__(
|
||||
"pld\t%a0"
|
||||
:
|
||||
: "p" (ptr)
|
||||
: "cc");
|
||||
:: "p" (ptr));
|
||||
}
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
|
||||
#define ARCH_HAS_PREFETCHW
|
||||
#define prefetchw(ptr) prefetch(ptr)
|
||||
|
||||
#define ARCH_HAS_SPINLOCK_PREFETCH
|
||||
#define spin_lock_prefetch(x) do { } while (0)
|
||||
|
||||
static inline void prefetchw(const void *ptr)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
".arch_extension mp\n"
|
||||
__ALT_SMP_ASM(
|
||||
WASM(pldw) "\t%a0",
|
||||
WASM(pld) "\t%a0"
|
||||
)
|
||||
:: "p" (ptr));
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define HAVE_ARCH_PICK_MMAP_LAYOUT
|
||||
|
|
|
@ -84,6 +84,8 @@ extern void arch_send_call_function_single_ipi(int cpu);
|
|||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
|
||||
|
||||
extern int register_ipi_completion(struct completion *completion, int cpu);
|
||||
|
||||
struct smp_operations {
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
|
|
|
@ -5,21 +5,13 @@
|
|||
#error SMP not supported on pre-ARMv6 CPUs
|
||||
#endif
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <linux/prefetch.h>
|
||||
|
||||
/*
|
||||
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
|
||||
* extensions, so when running on UP, we have to patch these instructions away.
|
||||
*/
|
||||
#define ALT_SMP(smp, up) \
|
||||
"9998: " smp "\n" \
|
||||
" .pushsection \".alt.smp.init\", \"a\"\n" \
|
||||
" .long 9998b\n" \
|
||||
" " up "\n" \
|
||||
" .popsection\n"
|
||||
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
#define SEV ALT_SMP("sev.w", "nop.w")
|
||||
/*
|
||||
* For Thumb-2, special care is needed to ensure that the conditional WFE
|
||||
* instruction really does assemble to exactly 4 bytes (as required by
|
||||
|
@ -31,17 +23,18 @@
|
|||
* the assembler won't change IT instructions which are explicitly present
|
||||
* in the input.
|
||||
*/
|
||||
#define WFE(cond) ALT_SMP( \
|
||||
#define WFE(cond) __ALT_SMP_ASM( \
|
||||
"it " cond "\n\t" \
|
||||
"wfe" cond ".n", \
|
||||
\
|
||||
"nop.w" \
|
||||
)
|
||||
#else
|
||||
#define SEV ALT_SMP("sev", "nop")
|
||||
#define WFE(cond) ALT_SMP("wfe" cond, "nop")
|
||||
#define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
|
||||
#endif
|
||||
|
||||
#define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
|
||||
|
||||
static inline void dsb_sev(void)
|
||||
{
|
||||
#if __LINUX_ARM_ARCH__ >= 7
|
||||
|
@ -77,6 +70,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|||
u32 newval;
|
||||
arch_spinlock_t lockval;
|
||||
|
||||
prefetchw(&lock->slock);
|
||||
__asm__ __volatile__(
|
||||
"1: ldrex %0, [%3]\n"
|
||||
" add %1, %0, %4\n"
|
||||
|
@ -100,6 +94,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|||
unsigned long contended, res;
|
||||
u32 slock;
|
||||
|
||||
prefetchw(&lock->slock);
|
||||
do {
|
||||
__asm__ __volatile__(
|
||||
" ldrex %0, [%3]\n"
|
||||
|
@ -156,6 +151,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
|
|||
{
|
||||
unsigned long tmp;
|
||||
|
||||
prefetchw(&rw->lock);
|
||||
__asm__ __volatile__(
|
||||
"1: ldrex %0, [%1]\n"
|
||||
" teq %0, #0\n"
|
||||
|
@ -174,6 +170,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
|
|||
{
|
||||
unsigned long contended, res;
|
||||
|
||||
prefetchw(&rw->lock);
|
||||
do {
|
||||
__asm__ __volatile__(
|
||||
" ldrex %0, [%2]\n"
|
||||
|
@ -207,7 +204,7 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
|
|||
}
|
||||
|
||||
/* write_can_lock - would write_trylock() succeed? */
|
||||
#define arch_write_can_lock(x) ((x)->lock == 0)
|
||||
#define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
|
||||
|
||||
/*
|
||||
* Read locks are a bit more hairy:
|
||||
|
@ -225,6 +222,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
|
|||
{
|
||||
unsigned long tmp, tmp2;
|
||||
|
||||
prefetchw(&rw->lock);
|
||||
__asm__ __volatile__(
|
||||
"1: ldrex %0, [%2]\n"
|
||||
" adds %0, %0, #1\n"
|
||||
|
@ -245,6 +243,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
|
|||
|
||||
smp_mb();
|
||||
|
||||
prefetchw(&rw->lock);
|
||||
__asm__ __volatile__(
|
||||
"1: ldrex %0, [%2]\n"
|
||||
" sub %0, %0, #1\n"
|
||||
|
@ -263,6 +262,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|||
{
|
||||
unsigned long contended, res;
|
||||
|
||||
prefetchw(&rw->lock);
|
||||
do {
|
||||
__asm__ __volatile__(
|
||||
" ldrex %0, [%2]\n"
|
||||
|
@ -284,7 +284,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|||
}
|
||||
|
||||
/* read_can_lock - would read_trylock() succeed? */
|
||||
#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
|
||||
#define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
|
||||
|
||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||
|
|
|
@ -25,7 +25,7 @@ typedef struct {
|
|||
#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
|
||||
|
||||
typedef struct {
|
||||
volatile unsigned int lock;
|
||||
u32 lock;
|
||||
} arch_rwlock_t;
|
||||
|
||||
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
|
||||
|
|
|
@ -38,6 +38,8 @@
|
|||
#ifdef __ASSEMBLY__
|
||||
#define W(instr) instr.w
|
||||
#define BSYM(sym) sym + 1
|
||||
#else
|
||||
#define WASM(instr) #instr ".w"
|
||||
#endif
|
||||
|
||||
#else /* !CONFIG_THUMB2_KERNEL */
|
||||
|
@ -50,6 +52,8 @@
|
|||
#ifdef __ASSEMBLY__
|
||||
#define W(instr) instr
|
||||
#define BSYM(sym) sym
|
||||
#else
|
||||
#define WASM(instr) #instr
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_THUMB2_KERNEL */
|
||||
|
|
|
@ -25,12 +25,14 @@
|
|||
|
||||
.macro waituart,rd,rx
|
||||
1001: ldr \rd, [\rx, #UART01x_FR]
|
||||
ARM_BE8( rev \rd, \rd )
|
||||
tst \rd, #UART01x_FR_TXFF
|
||||
bne 1001b
|
||||
.endm
|
||||
|
||||
.macro busyuart,rd,rx
|
||||
1001: ldr \rd, [\rx, #UART01x_FR]
|
||||
ARM_BE8( rev \rd, \rd )
|
||||
tst \rd, #UART01x_FR_BUSY
|
||||
bne 1001b
|
||||
.endm
|
||||
|
|
|
@ -7,6 +7,7 @@ header-y += hwcap.h
|
|||
header-y += ioctls.h
|
||||
header-y += kvm_para.h
|
||||
header-y += mman.h
|
||||
header-y += perf_regs.h
|
||||
header-y += posix_types.h
|
||||
header-y += ptrace.h
|
||||
header-y += setup.h
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
#ifndef _ASM_ARM_PERF_REGS_H
|
||||
#define _ASM_ARM_PERF_REGS_H
|
||||
|
||||
enum perf_event_arm_regs {
|
||||
PERF_REG_ARM_R0,
|
||||
PERF_REG_ARM_R1,
|
||||
PERF_REG_ARM_R2,
|
||||
PERF_REG_ARM_R3,
|
||||
PERF_REG_ARM_R4,
|
||||
PERF_REG_ARM_R5,
|
||||
PERF_REG_ARM_R6,
|
||||
PERF_REG_ARM_R7,
|
||||
PERF_REG_ARM_R8,
|
||||
PERF_REG_ARM_R9,
|
||||
PERF_REG_ARM_R10,
|
||||
PERF_REG_ARM_FP,
|
||||
PERF_REG_ARM_IP,
|
||||
PERF_REG_ARM_SP,
|
||||
PERF_REG_ARM_LR,
|
||||
PERF_REG_ARM_PC,
|
||||
PERF_REG_ARM_MAX,
|
||||
};
|
||||
#endif /* _ASM_ARM_PERF_REGS_H */
|
|
@ -17,7 +17,8 @@ CFLAGS_REMOVE_return_address.o = -pg
|
|||
|
||||
obj-y := elf.o entry-common.o irq.o opcodes.o \
|
||||
process.o ptrace.o return_address.o \
|
||||
setup.o signal.o stacktrace.o sys_arm.o time.o traps.o
|
||||
setup.o signal.o sigreturn_codes.o \
|
||||
stacktrace.o sys_arm.o time.o traps.o
|
||||
|
||||
obj-$(CONFIG_ATAGS) += atags_parse.o
|
||||
obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
|
||||
|
@ -78,6 +79,7 @@ obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
|
|||
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
|
||||
obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
|
||||
obj-$(CONFIG_IWMMXT) += iwmmxt.o
|
||||
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
|
||||
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o
|
||||
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
|
||||
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
|
||||
|
|
|
@ -155,4 +155,5 @@ EXPORT_SYMBOL(__gnu_mcount_nc);
|
|||
|
||||
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
|
||||
EXPORT_SYMBOL(__pv_phys_offset);
|
||||
EXPORT_SYMBOL(__pv_offset);
|
||||
#endif
|
||||
|
|
|
@ -416,9 +416,8 @@ __und_usr:
|
|||
bne __und_usr_thumb
|
||||
sub r4, r2, #4 @ ARM instr at LR - 4
|
||||
1: ldrt r0, [r4]
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
rev r0, r0 @ little endian instruction
|
||||
#endif
|
||||
ARM_BE8(rev r0, r0) @ little endian instruction
|
||||
|
||||
@ r0 = 32-bit ARM instruction which caused the exception
|
||||
@ r2 = PC value for the following instruction (:= regs->ARM_pc)
|
||||
@ r4 = PC value for the faulting instruction
|
||||
|
|
|
@ -393,9 +393,7 @@ ENTRY(vector_swi)
|
|||
#else
|
||||
USER( ldr r10, [lr, #-4] ) @ get SWI instruction
|
||||
#endif
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
rev r10, r10 @ little endian instruction
|
||||
#endif
|
||||
ARM_BE8(rev r10, r10) @ little endian instruction
|
||||
|
||||
#elif defined(CONFIG_AEABI)
|
||||
|
||||
|
|
|
@ -77,6 +77,7 @@
|
|||
|
||||
__HEAD
|
||||
ENTRY(stext)
|
||||
ARM_BE8(setend be ) @ ensure we are in BE8 mode
|
||||
|
||||
THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
|
||||
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
|
||||
|
@ -352,6 +353,9 @@ ENTRY(secondary_startup)
|
|||
* the processor type - there is no need to check the machine type
|
||||
* as it has already been validated by the primary processor.
|
||||
*/
|
||||
|
||||
ARM_BE8(setend be) @ ensure we are in BE8 mode
|
||||
|
||||
#ifdef CONFIG_ARM_VIRT_EXT
|
||||
bl __hyp_stub_install_secondary
|
||||
#endif
|
||||
|
@ -555,6 +559,14 @@ ENTRY(fixup_smp)
|
|||
ldmfd sp!, {r4 - r6, pc}
|
||||
ENDPROC(fixup_smp)
|
||||
|
||||
#ifdef __ARMEB__
|
||||
#define LOW_OFFSET 0x4
|
||||
#define HIGH_OFFSET 0x0
|
||||
#else
|
||||
#define LOW_OFFSET 0x0
|
||||
#define HIGH_OFFSET 0x4
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
|
||||
|
||||
/* __fixup_pv_table - patch the stub instructions with the delta between
|
||||
|
@ -565,17 +577,20 @@ ENDPROC(fixup_smp)
|
|||
__HEAD
|
||||
__fixup_pv_table:
|
||||
adr r0, 1f
|
||||
ldmia r0, {r3-r5, r7}
|
||||
sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
|
||||
ldmia r0, {r3-r7}
|
||||
mvn ip, #0
|
||||
subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
|
||||
add r4, r4, r3 @ adjust table start address
|
||||
add r5, r5, r3 @ adjust table end address
|
||||
add r7, r7, r3 @ adjust __pv_phys_offset address
|
||||
str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset
|
||||
add r6, r6, r3 @ adjust __pv_phys_offset address
|
||||
add r7, r7, r3 @ adjust __pv_offset address
|
||||
str r8, [r6, #LOW_OFFSET] @ save computed PHYS_OFFSET to __pv_phys_offset
|
||||
strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits
|
||||
mov r6, r3, lsr #24 @ constant for add/sub instructions
|
||||
teq r3, r6, lsl #24 @ must be 16MiB aligned
|
||||
THUMB( it ne @ cross section branch )
|
||||
bne __error
|
||||
str r6, [r7, #4] @ save to __pv_offset
|
||||
str r3, [r7, #LOW_OFFSET] @ save to __pv_offset low bits
|
||||
b __fixup_a_pv_table
|
||||
ENDPROC(__fixup_pv_table)
|
||||
|
||||
|
@ -584,10 +599,19 @@ ENDPROC(__fixup_pv_table)
|
|||
.long __pv_table_begin
|
||||
.long __pv_table_end
|
||||
2: .long __pv_phys_offset
|
||||
.long __pv_offset
|
||||
|
||||
.text
|
||||
__fixup_a_pv_table:
|
||||
adr r0, 3f
|
||||
ldr r6, [r0]
|
||||
add r6, r6, r3
|
||||
ldr r0, [r6, #HIGH_OFFSET] @ pv_offset high word
|
||||
ldr r6, [r6, #LOW_OFFSET] @ pv_offset low word
|
||||
mov r6, r6, lsr #24
|
||||
cmn r0, #1
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
moveq r0, #0x200000 @ set bit 21, mov to mvn instruction
|
||||
lsls r6, #24
|
||||
beq 2f
|
||||
clz r7, r6
|
||||
|
@ -601,18 +625,42 @@ __fixup_a_pv_table:
|
|||
b 2f
|
||||
1: add r7, r3
|
||||
ldrh ip, [r7, #2]
|
||||
and ip, 0x8f00
|
||||
orr ip, r6 @ mask in offset bits 31-24
|
||||
ARM_BE8(rev16 ip, ip)
|
||||
tst ip, #0x4000
|
||||
and ip, #0x8f00
|
||||
orrne ip, r6 @ mask in offset bits 31-24
|
||||
orreq ip, r0 @ mask in offset bits 7-0
|
||||
ARM_BE8(rev16 ip, ip)
|
||||
strh ip, [r7, #2]
|
||||
bne 2f
|
||||
ldrh ip, [r7]
|
||||
ARM_BE8(rev16 ip, ip)
|
||||
bic ip, #0x20
|
||||
orr ip, ip, r0, lsr #16
|
||||
ARM_BE8(rev16 ip, ip)
|
||||
strh ip, [r7]
|
||||
2: cmp r4, r5
|
||||
ldrcc r7, [r4], #4 @ use branch for delay slot
|
||||
bcc 1b
|
||||
bx lr
|
||||
#else
|
||||
moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
|
||||
b 2f
|
||||
1: ldr ip, [r7, r3]
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
@ in BE8, we load data in BE, but instructions still in LE
|
||||
bic ip, ip, #0xff000000
|
||||
tst ip, #0x000f0000 @ check the rotation field
|
||||
orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24
|
||||
biceq ip, ip, #0x00004000 @ clear bit 22
|
||||
orreq ip, ip, r0, lsl #24 @ mask in offset bits 7-0
|
||||
#else
|
||||
bic ip, ip, #0x000000ff
|
||||
orr ip, ip, r6 @ mask in offset bits 31-24
|
||||
tst ip, #0xf00 @ check the rotation field
|
||||
orrne ip, ip, r6 @ mask in offset bits 31-24
|
||||
biceq ip, ip, #0x400000 @ clear bit 22
|
||||
orreq ip, ip, r0 @ mask in offset bits 7-0
|
||||
#endif
|
||||
str ip, [r7, r3]
|
||||
2: cmp r4, r5
|
||||
ldrcc r7, [r4], #4 @ use branch for delay slot
|
||||
|
@ -621,28 +669,30 @@ __fixup_a_pv_table:
|
|||
#endif
|
||||
ENDPROC(__fixup_a_pv_table)
|
||||
|
||||
.align
|
||||
3: .long __pv_offset
|
||||
|
||||
ENTRY(fixup_pv_table)
|
||||
stmfd sp!, {r4 - r7, lr}
|
||||
ldr r2, 2f @ get address of __pv_phys_offset
|
||||
mov r3, #0 @ no offset
|
||||
mov r4, r0 @ r0 = table start
|
||||
add r5, r0, r1 @ r1 = table size
|
||||
ldr r6, [r2, #4] @ get __pv_offset
|
||||
bl __fixup_a_pv_table
|
||||
ldmfd sp!, {r4 - r7, pc}
|
||||
ENDPROC(fixup_pv_table)
|
||||
|
||||
.align
|
||||
2: .long __pv_phys_offset
|
||||
|
||||
.data
|
||||
.globl __pv_phys_offset
|
||||
.type __pv_phys_offset, %object
|
||||
__pv_phys_offset:
|
||||
.long 0
|
||||
.size __pv_phys_offset, . - __pv_phys_offset
|
||||
.quad 0
|
||||
.size __pv_phys_offset, . -__pv_phys_offset
|
||||
|
||||
.globl __pv_offset
|
||||
.type __pv_offset, %object
|
||||
__pv_offset:
|
||||
.long 0
|
||||
.quad 0
|
||||
.size __pv_offset, . -__pv_offset
|
||||
#endif
|
||||
|
||||
#include "head-common.S"
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <asm/sections.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/unwind.h>
|
||||
#include <asm/opcodes.h>
|
||||
|
||||
#ifdef CONFIG_XIP_KERNEL
|
||||
/*
|
||||
|
@ -60,6 +61,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
|||
Elf32_Sym *sym;
|
||||
const char *symname;
|
||||
s32 offset;
|
||||
u32 tmp;
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
u32 upper, lower, sign, j1, j2;
|
||||
#endif
|
||||
|
@ -95,7 +97,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
|||
case R_ARM_PC24:
|
||||
case R_ARM_CALL:
|
||||
case R_ARM_JUMP24:
|
||||
offset = (*(u32 *)loc & 0x00ffffff) << 2;
|
||||
offset = __mem_to_opcode_arm(*(u32 *)loc);
|
||||
offset = (offset & 0x00ffffff) << 2;
|
||||
if (offset & 0x02000000)
|
||||
offset -= 0x04000000;
|
||||
|
||||
|
@ -111,9 +114,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
|||
}
|
||||
|
||||
offset >>= 2;
|
||||
offset &= 0x00ffffff;
|
||||
|
||||
*(u32 *)loc &= 0xff000000;
|
||||
*(u32 *)loc |= offset & 0x00ffffff;
|
||||
*(u32 *)loc &= __opcode_to_mem_arm(0xff000000);
|
||||
*(u32 *)loc |= __opcode_to_mem_arm(offset);
|
||||
break;
|
||||
|
||||
case R_ARM_V4BX:
|
||||
|
@ -121,8 +125,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
|||
* other bits to re-code instruction as
|
||||
* MOV PC,Rm.
|
||||
*/
|
||||
*(u32 *)loc &= 0xf000000f;
|
||||
*(u32 *)loc |= 0x01a0f000;
|
||||
*(u32 *)loc &= __opcode_to_mem_arm(0xf000000f);
|
||||
*(u32 *)loc |= __opcode_to_mem_arm(0x01a0f000);
|
||||
break;
|
||||
|
||||
case R_ARM_PREL31:
|
||||
|
@ -132,7 +136,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
|||
|
||||
case R_ARM_MOVW_ABS_NC:
|
||||
case R_ARM_MOVT_ABS:
|
||||
offset = *(u32 *)loc;
|
||||
offset = tmp = __mem_to_opcode_arm(*(u32 *)loc);
|
||||
offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
|
||||
offset = (offset ^ 0x8000) - 0x8000;
|
||||
|
||||
|
@ -140,16 +144,18 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
|||
if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS)
|
||||
offset >>= 16;
|
||||
|
||||
*(u32 *)loc &= 0xfff0f000;
|
||||
*(u32 *)loc |= ((offset & 0xf000) << 4) |
|
||||
(offset & 0x0fff);
|
||||
tmp &= 0xfff0f000;
|
||||
tmp |= ((offset & 0xf000) << 4) |
|
||||
(offset & 0x0fff);
|
||||
|
||||
*(u32 *)loc = __opcode_to_mem_arm(tmp);
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
case R_ARM_THM_CALL:
|
||||
case R_ARM_THM_JUMP24:
|
||||
upper = *(u16 *)loc;
|
||||
lower = *(u16 *)(loc + 2);
|
||||
upper = __mem_to_opcode_thumb16(*(u16 *)loc);
|
||||
lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
|
||||
|
||||
/*
|
||||
* 25 bit signed address range (Thumb-2 BL and B.W
|
||||
|
@ -198,17 +204,20 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
|||
sign = (offset >> 24) & 1;
|
||||
j1 = sign ^ (~(offset >> 23) & 1);
|
||||
j2 = sign ^ (~(offset >> 22) & 1);
|
||||
*(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) |
|
||||
upper = (u16)((upper & 0xf800) | (sign << 10) |
|
||||
((offset >> 12) & 0x03ff));
|
||||
*(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
|
||||
(j1 << 13) | (j2 << 11) |
|
||||
((offset >> 1) & 0x07ff));
|
||||
lower = (u16)((lower & 0xd000) |
|
||||
(j1 << 13) | (j2 << 11) |
|
||||
((offset >> 1) & 0x07ff));
|
||||
|
||||
*(u16 *)loc = __opcode_to_mem_thumb16(upper);
|
||||
*(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
|
||||
break;
|
||||
|
||||
case R_ARM_THM_MOVW_ABS_NC:
|
||||
case R_ARM_THM_MOVT_ABS:
|
||||
upper = *(u16 *)loc;
|
||||
lower = *(u16 *)(loc + 2);
|
||||
upper = __mem_to_opcode_thumb16(*(u16 *)loc);
|
||||
lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
|
||||
|
||||
/*
|
||||
* MOVT/MOVW instructions encoding in Thumb-2:
|
||||
|
@ -229,12 +238,14 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
|||
if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
|
||||
offset >>= 16;
|
||||
|
||||
*(u16 *)loc = (u16)((upper & 0xfbf0) |
|
||||
((offset & 0xf000) >> 12) |
|
||||
((offset & 0x0800) >> 1));
|
||||
*(u16 *)(loc + 2) = (u16)((lower & 0x8f00) |
|
||||
((offset & 0x0700) << 4) |
|
||||
(offset & 0x00ff));
|
||||
upper = (u16)((upper & 0xfbf0) |
|
||||
((offset & 0xf000) >> 12) |
|
||||
((offset & 0x0800) >> 1));
|
||||
lower = (u16)((lower & 0x8f00) |
|
||||
((offset & 0x0700) << 4) |
|
||||
(offset & 0x00ff));
|
||||
*(u16 *)loc = __opcode_to_mem_thumb16(upper);
|
||||
*(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
|
||||
break;
|
||||
#endif
|
||||
|
||||
|
|
|
@ -256,12 +256,11 @@ validate_event(struct pmu_hw_events *hw_events,
|
|||
struct perf_event *event)
|
||||
{
|
||||
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||
struct pmu *leader_pmu = event->group_leader->pmu;
|
||||
|
||||
if (is_software_event(event))
|
||||
return 1;
|
||||
|
||||
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
|
||||
if (event->state < PERF_EVENT_STATE_OFF)
|
||||
return 1;
|
||||
|
||||
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/bug.h>
|
||||
#include <asm/perf_regs.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
u64 perf_reg_value(struct pt_regs *regs, int idx)
|
||||
{
|
||||
if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM_MAX))
|
||||
return 0;
|
||||
|
||||
return regs->uregs[idx];
|
||||
}
|
||||
|
||||
#define REG_RESERVED (~((1ULL << PERF_REG_ARM_MAX) - 1))
|
||||
|
||||
int perf_reg_validate(u64 mask)
|
||||
{
|
||||
if (!mask || mask & REG_RESERVED)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
u64 perf_reg_abi(struct task_struct *task)
|
||||
{
|
||||
return PERF_SAMPLE_REGS_ABI_32;
|
||||
}
|
|
@ -73,6 +73,8 @@ __setup("fpe=", fpe_setup);
|
|||
#endif
|
||||
|
||||
extern void paging_init(const struct machine_desc *desc);
|
||||
extern void early_paging_init(const struct machine_desc *,
|
||||
struct proc_info_list *);
|
||||
extern void sanity_check_meminfo(void);
|
||||
extern enum reboot_mode reboot_mode;
|
||||
extern void setup_dma_zone(const struct machine_desc *desc);
|
||||
|
@ -888,6 +890,8 @@ void __init setup_arch(char **cmdline_p)
|
|||
parse_early_param();
|
||||
|
||||
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
|
||||
|
||||
early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
|
||||
sanity_check_meminfo();
|
||||
arm_memblock_init(&meminfo, mdesc);
|
||||
|
||||
|
|
|
@ -21,29 +21,7 @@
|
|||
#include <asm/unistd.h>
|
||||
#include <asm/vfp.h>
|
||||
|
||||
/*
|
||||
* For ARM syscalls, we encode the syscall number into the instruction.
|
||||
*/
|
||||
#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
|
||||
#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
|
||||
|
||||
/*
|
||||
* With EABI, the syscall number has to be loaded into r7.
|
||||
*/
|
||||
#define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
|
||||
#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
|
||||
|
||||
/*
|
||||
* For Thumb syscalls, we pass the syscall number via r7. We therefore
|
||||
* need two 16-bit instructions.
|
||||
*/
|
||||
#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
|
||||
#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
|
||||
|
||||
static const unsigned long sigreturn_codes[7] = {
|
||||
MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
|
||||
MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
|
||||
};
|
||||
extern const unsigned long sigreturn_codes[7];
|
||||
|
||||
static unsigned long signal_return_offset;
|
||||
|
||||
|
|
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
* sigreturn_codes.S - code sinpets for sigreturn syscalls
|
||||
*
|
||||
* Created by: Victor Kamensky, 2013-08-13
|
||||
* Copyright: (C) 2013 Linaro Limited
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <asm/unistd.h>
|
||||
|
||||
/*
|
||||
* For ARM syscalls, we encode the syscall number into the instruction.
|
||||
* With EABI, the syscall number has to be loaded into r7. As result
|
||||
* ARM syscall sequence snippet will have move and svc in .arm encoding
|
||||
*
|
||||
* For Thumb syscalls, we pass the syscall number via r7. We therefore
|
||||
* need two 16-bit instructions in .thumb encoding
|
||||
*
|
||||
* Please note sigreturn_codes code are not executed in place. Instead
|
||||
* they just copied by kernel into appropriate places. Code inside of
|
||||
* arch/arm/kernel/signal.c is very sensitive to layout of these code
|
||||
* snippets.
|
||||
*/
|
||||
|
||||
#if __LINUX_ARM_ARCH__ <= 4
|
||||
/*
|
||||
* Note we manually set minimally required arch that supports
|
||||
* required thumb opcodes for early arch versions. It is OK
|
||||
* for this file to be used in combination with other
|
||||
* lower arch variants, since these code snippets are only
|
||||
* used as input data.
|
||||
*/
|
||||
.arch armv4t
|
||||
#endif
|
||||
|
||||
.section .rodata
|
||||
.global sigreturn_codes
|
||||
.type sigreturn_codes, #object
|
||||
|
||||
.arm
|
||||
|
||||
sigreturn_codes:
|
||||
|
||||
/* ARM sigreturn syscall code snippet */
|
||||
mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
|
||||
swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)
|
||||
|
||||
/* Thumb sigreturn syscall code snippet */
|
||||
.thumb
|
||||
movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
|
||||
swi #0
|
||||
|
||||
/* ARM sigreturn_rt syscall code snippet */
|
||||
.arm
|
||||
mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
|
||||
swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)
|
||||
|
||||
/* Thumb sigreturn_rt syscall code snippet */
|
||||
.thumb
|
||||
movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
|
||||
swi #0
|
||||
|
||||
/*
|
||||
* Note on addtional space: setup_return in signal.c
|
||||
* algorithm uses two words copy regardless whether
|
||||
* it is thumb case or not, so we need additional
|
||||
* word after real last entry.
|
||||
*/
|
||||
.arm
|
||||
.space 4
|
||||
|
||||
.size sigreturn_codes, . - sigreturn_codes
|
|
@ -55,6 +55,7 @@
|
|||
* specific registers and some other data for resume.
|
||||
* r0 = suspend function arg0
|
||||
* r1 = suspend function
|
||||
* r2 = MPIDR value the resuming CPU will use
|
||||
*/
|
||||
ENTRY(__cpu_suspend)
|
||||
stmfd sp!, {r4 - r11, lr}
|
||||
|
@ -67,23 +68,18 @@ ENTRY(__cpu_suspend)
|
|||
mov r5, sp @ current virtual SP
|
||||
add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn
|
||||
sub sp, sp, r4 @ allocate CPU state on stack
|
||||
stmfd sp!, {r0, r1} @ save suspend func arg and pointer
|
||||
add r0, sp, #8 @ save pointer to save block
|
||||
mov r1, r4 @ size of save block
|
||||
mov r2, r5 @ virtual SP
|
||||
ldr r3, =sleep_save_sp
|
||||
stmfd sp!, {r0, r1} @ save suspend func arg and pointer
|
||||
ldr r3, [r3, #SLEEP_SAVE_SP_VIRT]
|
||||
ALT_SMP(mrc p15, 0, r9, c0, c0, 5)
|
||||
ALT_UP_B(1f)
|
||||
ldr r8, =mpidr_hash
|
||||
/*
|
||||
* This ldmia relies on the memory layout of the mpidr_hash
|
||||
* struct mpidr_hash.
|
||||
*/
|
||||
ldmia r8, {r4-r7} @ r4 = mpidr mask (r5,r6,r7) = l[0,1,2] shifts
|
||||
compute_mpidr_hash lr, r5, r6, r7, r9, r4
|
||||
add r3, r3, lr, lsl #2
|
||||
1:
|
||||
ALT_SMP(ldr r0, =mpidr_hash)
|
||||
ALT_UP_B(1f)
|
||||
/* This ldmia relies on the memory layout of the mpidr_hash struct */
|
||||
ldmia r0, {r1, r6-r8} @ r1 = mpidr mask (r6,r7,r8) = l[0,1,2] shifts
|
||||
compute_mpidr_hash r0, r6, r7, r8, r2, r1
|
||||
add r3, r3, r0, lsl #2
|
||||
1: mov r2, r5 @ virtual SP
|
||||
mov r1, r4 @ size of save block
|
||||
add r0, sp, #8 @ pointer to save block
|
||||
bl __cpu_suspend_save
|
||||
adr lr, BSYM(cpu_suspend_abort)
|
||||
ldmfd sp!, {r0, pc} @ call suspend fn
|
||||
|
@ -130,6 +126,7 @@ ENDPROC(cpu_resume_after_mmu)
|
|||
.data
|
||||
.align
|
||||
ENTRY(cpu_resume)
|
||||
ARM_BE8(setend be) @ ensure we are in BE mode
|
||||
mov r1, #0
|
||||
ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
|
||||
ALT_UP_B(1f)
|
||||
|
|
|
@ -68,6 +68,7 @@ enum ipi_msg_type {
|
|||
IPI_CALL_FUNC_SINGLE,
|
||||
IPI_CPU_STOP,
|
||||
IPI_IRQ_WORK,
|
||||
IPI_COMPLETION,
|
||||
};
|
||||
|
||||
static DECLARE_COMPLETION(cpu_running);
|
||||
|
@ -82,7 +83,7 @@ void __init smp_set_ops(struct smp_operations *ops)
|
|||
|
||||
static unsigned long get_arch_pgd(pgd_t *pgd)
|
||||
{
|
||||
phys_addr_t pgdir = virt_to_phys(pgd);
|
||||
phys_addr_t pgdir = virt_to_idmap(pgd);
|
||||
BUG_ON(pgdir & ARCH_PGD_MASK);
|
||||
return pgdir >> ARCH_PGD_SHIFT;
|
||||
}
|
||||
|
@ -467,6 +468,7 @@ static const char *ipi_types[NR_IPI] = {
|
|||
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
|
||||
S(IPI_CPU_STOP, "CPU stop interrupts"),
|
||||
S(IPI_IRQ_WORK, "IRQ work interrupts"),
|
||||
S(IPI_COMPLETION, "completion interrupts"),
|
||||
};
|
||||
|
||||
void show_ipi_list(struct seq_file *p, int prec)
|
||||
|
@ -526,6 +528,19 @@ static void ipi_cpu_stop(unsigned int cpu)
|
|||
cpu_relax();
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct completion *, cpu_completion);
|
||||
|
||||
int register_ipi_completion(struct completion *completion, int cpu)
|
||||
{
|
||||
per_cpu(cpu_completion, cpu) = completion;
|
||||
return IPI_COMPLETION;
|
||||
}
|
||||
|
||||
static void ipi_complete(unsigned int cpu)
|
||||
{
|
||||
complete(per_cpu(cpu_completion, cpu));
|
||||
}
|
||||
|
||||
/*
|
||||
* Main handler for inter-processor interrupts
|
||||
*/
|
||||
|
@ -584,6 +599,12 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
|||
break;
|
||||
#endif
|
||||
|
||||
case IPI_COMPLETION:
|
||||
irq_enter();
|
||||
ipi_complete(cpu);
|
||||
irq_exit();
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
|
||||
cpu, ipinr);
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
*/
|
||||
unsigned int __init scu_get_core_count(void __iomem *scu_base)
|
||||
{
|
||||
unsigned int ncores = __raw_readl(scu_base + SCU_CONFIG);
|
||||
unsigned int ncores = readl_relaxed(scu_base + SCU_CONFIG);
|
||||
return (ncores & 0x03) + 1;
|
||||
}
|
||||
|
||||
|
@ -42,19 +42,19 @@ void scu_enable(void __iomem *scu_base)
|
|||
#ifdef CONFIG_ARM_ERRATA_764369
|
||||
/* Cortex-A9 only */
|
||||
if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) {
|
||||
scu_ctrl = __raw_readl(scu_base + 0x30);
|
||||
scu_ctrl = readl_relaxed(scu_base + 0x30);
|
||||
if (!(scu_ctrl & 1))
|
||||
__raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
|
||||
writel_relaxed(scu_ctrl | 0x1, scu_base + 0x30);
|
||||
}
|
||||
#endif
|
||||
|
||||
scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
|
||||
scu_ctrl = readl_relaxed(scu_base + SCU_CTRL);
|
||||
/* already enabled? */
|
||||
if (scu_ctrl & 1)
|
||||
return;
|
||||
|
||||
scu_ctrl |= 1;
|
||||
__raw_writel(scu_ctrl, scu_base + SCU_CTRL);
|
||||
writel_relaxed(scu_ctrl, scu_base + SCU_CTRL);
|
||||
|
||||
/*
|
||||
* Ensure that the data accessed by CPU0 before the SCU was
|
||||
|
@ -80,9 +80,9 @@ int scu_power_mode(void __iomem *scu_base, unsigned int mode)
|
|||
if (mode > 3 || mode == 1 || cpu > 3)
|
||||
return -EINVAL;
|
||||
|
||||
val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
|
||||
val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
|
||||
val |= mode;
|
||||
__raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu);
|
||||
writeb_relaxed(val, scu_base + SCU_CPU_STATUS + cpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ static void twd_set_mode(enum clock_event_mode mode,
|
|||
case CLOCK_EVT_MODE_PERIODIC:
|
||||
ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
|
||||
| TWD_TIMER_CONTROL_PERIODIC;
|
||||
__raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
|
||||
writel_relaxed(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
|
||||
twd_base + TWD_TIMER_LOAD);
|
||||
break;
|
||||
case CLOCK_EVT_MODE_ONESHOT:
|
||||
|
@ -58,18 +58,18 @@ static void twd_set_mode(enum clock_event_mode mode,
|
|||
ctrl = 0;
|
||||
}
|
||||
|
||||
__raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
|
||||
writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL);
|
||||
}
|
||||
|
||||
static int twd_set_next_event(unsigned long evt,
|
||||
struct clock_event_device *unused)
|
||||
{
|
||||
unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL);
|
||||
unsigned long ctrl = readl_relaxed(twd_base + TWD_TIMER_CONTROL);
|
||||
|
||||
ctrl |= TWD_TIMER_CONTROL_ENABLE;
|
||||
|
||||
__raw_writel(evt, twd_base + TWD_TIMER_COUNTER);
|
||||
__raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
|
||||
writel_relaxed(evt, twd_base + TWD_TIMER_COUNTER);
|
||||
writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -82,8 +82,8 @@ static int twd_set_next_event(unsigned long evt,
|
|||
*/
|
||||
static int twd_timer_ack(void)
|
||||
{
|
||||
if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) {
|
||||
__raw_writel(1, twd_base + TWD_TIMER_INTSTAT);
|
||||
if (readl_relaxed(twd_base + TWD_TIMER_INTSTAT)) {
|
||||
writel_relaxed(1, twd_base + TWD_TIMER_INTSTAT);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -211,15 +211,15 @@ static void twd_calibrate_rate(void)
|
|||
waitjiffies += 5;
|
||||
|
||||
/* enable, no interrupt or reload */
|
||||
__raw_writel(0x1, twd_base + TWD_TIMER_CONTROL);
|
||||
writel_relaxed(0x1, twd_base + TWD_TIMER_CONTROL);
|
||||
|
||||
/* maximum value */
|
||||
__raw_writel(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
|
||||
writel_relaxed(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
|
||||
|
||||
while (get_jiffies_64() < waitjiffies)
|
||||
udelay(10);
|
||||
|
||||
count = __raw_readl(twd_base + TWD_TIMER_COUNTER);
|
||||
count = readl_relaxed(twd_base + TWD_TIMER_COUNTER);
|
||||
|
||||
twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
|
||||
|
||||
|
@ -277,7 +277,7 @@ static void twd_timer_setup(void)
|
|||
* bother with the below.
|
||||
*/
|
||||
if (per_cpu(percpu_setup_called, cpu)) {
|
||||
__raw_writel(0, twd_base + TWD_TIMER_CONTROL);
|
||||
writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
|
||||
clockevents_register_device(clk);
|
||||
enable_percpu_irq(clk->irq, 0);
|
||||
return;
|
||||
|
@ -290,7 +290,7 @@ static void twd_timer_setup(void)
|
|||
* The following is done once per CPU the first time .setup() is
|
||||
* called.
|
||||
*/
|
||||
__raw_writel(0, twd_base + TWD_TIMER_CONTROL);
|
||||
writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
|
||||
|
||||
clk->name = "local_timer";
|
||||
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include <asm/suspend.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
|
||||
extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
|
||||
extern void cpu_resume_mmu(void);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
@ -21,6 +21,7 @@ extern void cpu_resume_mmu(void);
|
|||
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
||||
{
|
||||
struct mm_struct *mm = current->active_mm;
|
||||
u32 __mpidr = cpu_logical_map(smp_processor_id());
|
||||
int ret;
|
||||
|
||||
if (!idmap_pgd)
|
||||
|
@ -32,7 +33,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
|||
* resume (indicated by a zero return code), we need to switch
|
||||
* back to the correct page tables.
|
||||
*/
|
||||
ret = __cpu_suspend(arg, fn);
|
||||
ret = __cpu_suspend(arg, fn, __mpidr);
|
||||
if (ret == 0) {
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
local_flush_bp_all();
|
||||
|
@ -44,7 +45,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
|||
#else
|
||||
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
||||
{
|
||||
return __cpu_suspend(arg, fn);
|
||||
u32 __mpidr = cpu_logical_map(smp_processor_id());
|
||||
return __cpu_suspend(arg, fn, __mpidr);
|
||||
}
|
||||
#define idmap_pgd NULL
|
||||
#endif
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <asm/unwind.h>
|
||||
#include <asm/tls.h>
|
||||
#include <asm/system_misc.h>
|
||||
#include <asm/opcodes.h>
|
||||
|
||||
static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
|
||||
|
||||
|
@ -341,15 +342,17 @@ void arm_notify_die(const char *str, struct pt_regs *regs,
|
|||
int is_valid_bugaddr(unsigned long pc)
|
||||
{
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
unsigned short bkpt;
|
||||
u16 bkpt;
|
||||
u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
|
||||
#else
|
||||
unsigned long bkpt;
|
||||
u32 bkpt;
|
||||
u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
|
||||
#endif
|
||||
|
||||
if (probe_kernel_address((unsigned *)pc, bkpt))
|
||||
return 0;
|
||||
|
||||
return bkpt == BUG_INSTR_VALUE;
|
||||
return bkpt == insn;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -402,25 +405,28 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
|
|||
if (processor_mode(regs) == SVC_MODE) {
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
if (thumb_mode(regs)) {
|
||||
instr = ((u16 *)pc)[0];
|
||||
instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
|
||||
if (is_wide_instruction(instr)) {
|
||||
instr <<= 16;
|
||||
instr |= ((u16 *)pc)[1];
|
||||
u16 inst2;
|
||||
inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
|
||||
instr = __opcode_thumb32_compose(instr, inst2);
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
instr = *(u32 *) pc;
|
||||
instr = __mem_to_opcode_arm(*(u32 *) pc);
|
||||
} else if (thumb_mode(regs)) {
|
||||
if (get_user(instr, (u16 __user *)pc))
|
||||
goto die_sig;
|
||||
instr = __mem_to_opcode_thumb16(instr);
|
||||
if (is_wide_instruction(instr)) {
|
||||
unsigned int instr2;
|
||||
if (get_user(instr2, (u16 __user *)pc+1))
|
||||
goto die_sig;
|
||||
instr <<= 16;
|
||||
instr |= instr2;
|
||||
instr2 = __mem_to_opcode_thumb16(instr2);
|
||||
instr = __opcode_thumb32_compose(instr, instr2);
|
||||
}
|
||||
} else if (get_user(instr, (u32 __user *)pc)) {
|
||||
instr = __mem_to_opcode_arm(instr);
|
||||
goto die_sig;
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,11 @@ UNWIND( .fnstart )
|
|||
and r3, r0, #31 @ Get bit offset
|
||||
mov r0, r0, lsr #5
|
||||
add r1, r1, r0, lsl #2 @ Get word offset
|
||||
#if __LINUX_ARM_ARCH__ >= 7
|
||||
.arch_extension mp
|
||||
ALT_SMP(W(pldw) [r1])
|
||||
ALT_UP(W(nop))
|
||||
#endif
|
||||
mov r3, r2, lsl r3
|
||||
1: ldrex r2, [r1]
|
||||
\instr r2, r2, r3
|
||||
|
|
|
@ -4,6 +4,7 @@ config ARCH_HIGHBANK
|
|||
select ARCH_HAS_CPUFREQ
|
||||
select ARCH_HAS_HOLES_MEMORYMODEL
|
||||
select ARCH_HAS_OPP
|
||||
select ARCH_SUPPORTS_BIG_ENDIAN
|
||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||
select ARM_AMBA
|
||||
select ARM_ERRATA_764369
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
if ARCH_IXP4XX
|
||||
|
||||
config ARCH_SUPPORTS_BIG_ENDIAN
|
||||
bool
|
||||
default y
|
||||
|
||||
menu "Intel IXP4xx Implementation Options"
|
||||
|
||||
comment "IXP4xx Platforms"
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
config ARCH_MVEBU
|
||||
bool "Marvell SOCs with Device Tree support" if ARCH_MULTI_V7
|
||||
select ARCH_SUPPORTS_BIG_ENDIAN
|
||||
select CLKSRC_MMIO
|
||||
select COMMON_CLK
|
||||
select GENERIC_CLOCKEVENTS
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
#define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0
|
||||
#define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4
|
||||
|
||||
#include <asm/assembler.h>
|
||||
|
||||
.text
|
||||
/*
|
||||
* r0: Coherency fabric base register address
|
||||
|
@ -29,6 +31,7 @@ ENTRY(ll_set_cpu_coherent)
|
|||
/* Create bit by cpu index */
|
||||
mov r3, #(1 << 24)
|
||||
lsl r1, r3, r1
|
||||
ARM_BE8(rev r1, r1)
|
||||
|
||||
/* Add CPU to SMP group - Atomic */
|
||||
add r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET
|
||||
|
|
|
@ -21,12 +21,16 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/assembler.h>
|
||||
|
||||
/*
|
||||
* Armada XP specific entry point for secondary CPUs.
|
||||
* We add the CPU to the coherency fabric and then jump to secondary
|
||||
* startup
|
||||
*/
|
||||
ENTRY(armada_xp_secondary_startup)
|
||||
ARM_BE8(setend be ) @ go BE8 if entered LE
|
||||
|
||||
/* Get coherency fabric base physical address */
|
||||
adr r0, 1f
|
||||
ldr r1, [r0]
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
config ARCH_VEXPRESS
|
||||
bool "ARM Ltd. Versatile Express family" if ARCH_MULTI_V7
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select ARCH_SUPPORTS_BIG_ENDIAN
|
||||
select ARM_AMBA
|
||||
select ARM_GIC
|
||||
select ARM_TIMER_SP804
|
||||
|
|
|
@ -952,3 +952,9 @@ config ARCH_HAS_BARRIERS
|
|||
help
|
||||
This option allows the use of custom mandatory barriers
|
||||
included via the mach/barriers.h file.
|
||||
|
||||
config ARCH_SUPPORTS_BIG_ENDIAN
|
||||
bool
|
||||
help
|
||||
This option specifies the architecture can support big endian
|
||||
operation.
|
||||
|
|
|
@ -38,9 +38,8 @@ ENTRY(v6_early_abort)
|
|||
bne do_DataAbort
|
||||
bic r1, r1, #1 << 11 @ clear bit 11 of FSR
|
||||
ldr r3, [r4] @ read aborted ARM instruction
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
rev r3, r3
|
||||
#endif
|
||||
ARM_BE8(rev r3, r3)
|
||||
|
||||
do_ldrd_abort tmp=ip, insn=r3
|
||||
tst r3, #1 << 20 @ L = 0 -> write
|
||||
orreq r1, r1, #1 << 11 @ yes.
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <asm/cp15.h>
|
||||
#include <asm/system_info.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <asm/opcodes.h>
|
||||
|
||||
#include "fault.h"
|
||||
|
||||
|
@ -762,21 +763,25 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|||
if (thumb_mode(regs)) {
|
||||
u16 *ptr = (u16 *)(instrptr & ~1);
|
||||
fault = probe_kernel_address(ptr, tinstr);
|
||||
tinstr = __mem_to_opcode_thumb16(tinstr);
|
||||
if (!fault) {
|
||||
if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
|
||||
IS_T32(tinstr)) {
|
||||
/* Thumb-2 32-bit */
|
||||
u16 tinst2 = 0;
|
||||
fault = probe_kernel_address(ptr + 1, tinst2);
|
||||
instr = (tinstr << 16) | tinst2;
|
||||
tinst2 = __mem_to_opcode_thumb16(tinst2);
|
||||
instr = __opcode_thumb32_compose(tinstr, tinst2);
|
||||
thumb2_32b = 1;
|
||||
} else {
|
||||
isize = 2;
|
||||
instr = thumb2arm(tinstr);
|
||||
}
|
||||
}
|
||||
} else
|
||||
} else {
|
||||
fault = probe_kernel_address(instrptr, instr);
|
||||
instr = __mem_to_opcode_arm(instr);
|
||||
}
|
||||
|
||||
if (fault) {
|
||||
type = TYPE_FAULT;
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <asm/system_info.h>
|
||||
|
||||
pgd_t *idmap_pgd;
|
||||
phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
|
||||
|
@ -67,8 +68,9 @@ static void identity_mapping_add(pgd_t *pgd, const char *text_start,
|
|||
unsigned long addr, end;
|
||||
unsigned long next;
|
||||
|
||||
addr = virt_to_phys(text_start);
|
||||
end = virt_to_phys(text_end);
|
||||
addr = virt_to_idmap(text_start);
|
||||
end = virt_to_idmap(text_end);
|
||||
pr_info("Setting up static identity map for 0x%lx - 0x%lx\n", addr, end);
|
||||
|
||||
prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
|
||||
|
||||
|
@ -90,8 +92,6 @@ static int __init init_static_idmap(void)
|
|||
if (!idmap_pgd)
|
||||
return -ENOMEM;
|
||||
|
||||
pr_info("Setting up static identity map for 0x%p - 0x%p\n",
|
||||
__idmap_text_start, __idmap_text_end);
|
||||
identity_mapping_add(idmap_pgd, __idmap_text_start,
|
||||
__idmap_text_end, 0);
|
||||
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
#include <asm/highmem.h>
|
||||
#include <asm/system_info.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/procinfo.h>
|
||||
#include <asm/memory.h>
|
||||
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach/map.h>
|
||||
|
@ -1315,6 +1317,86 @@ static void __init map_lowmem(void)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
/*
|
||||
* early_paging_init() recreates boot time page table setup, allowing machines
|
||||
* to switch over to a high (>4G) address space on LPAE systems
|
||||
*/
|
||||
void __init early_paging_init(const struct machine_desc *mdesc,
|
||||
struct proc_info_list *procinfo)
|
||||
{
|
||||
pmdval_t pmdprot = procinfo->__cpu_mm_mmu_flags;
|
||||
unsigned long map_start, map_end;
|
||||
pgd_t *pgd0, *pgdk;
|
||||
pud_t *pud0, *pudk, *pud_start;
|
||||
pmd_t *pmd0, *pmdk;
|
||||
phys_addr_t phys;
|
||||
int i;
|
||||
|
||||
if (!(mdesc->init_meminfo))
|
||||
return;
|
||||
|
||||
/* remap kernel code and data */
|
||||
map_start = init_mm.start_code;
|
||||
map_end = init_mm.brk;
|
||||
|
||||
/* get a handle on things... */
|
||||
pgd0 = pgd_offset_k(0);
|
||||
pud_start = pud0 = pud_offset(pgd0, 0);
|
||||
pmd0 = pmd_offset(pud0, 0);
|
||||
|
||||
pgdk = pgd_offset_k(map_start);
|
||||
pudk = pud_offset(pgdk, map_start);
|
||||
pmdk = pmd_offset(pudk, map_start);
|
||||
|
||||
mdesc->init_meminfo();
|
||||
|
||||
/* Run the patch stub to update the constants */
|
||||
fixup_pv_table(&__pv_table_begin,
|
||||
(&__pv_table_end - &__pv_table_begin) << 2);
|
||||
|
||||
/*
|
||||
* Cache cleaning operations for self-modifying code
|
||||
* We should clean the entries by MVA but running a
|
||||
* for loop over every pv_table entry pointer would
|
||||
* just complicate the code.
|
||||
*/
|
||||
flush_cache_louis();
|
||||
dsb();
|
||||
isb();
|
||||
|
||||
/* remap level 1 table */
|
||||
for (i = 0; i < PTRS_PER_PGD; pud0++, i++) {
|
||||
set_pud(pud0,
|
||||
__pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
|
||||
pmd0 += PTRS_PER_PMD;
|
||||
}
|
||||
|
||||
/* remap pmds for kernel mapping */
|
||||
phys = __pa(map_start) & PMD_MASK;
|
||||
do {
|
||||
*pmdk++ = __pmd(phys | pmdprot);
|
||||
phys += PMD_SIZE;
|
||||
} while (phys < map_end);
|
||||
|
||||
flush_cache_all();
|
||||
cpu_switch_mm(pgd0, &init_mm);
|
||||
cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
|
||||
local_flush_bp_all();
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void __init early_paging_init(const struct machine_desc *mdesc,
|
||||
struct proc_info_list *procinfo)
|
||||
{
|
||||
if (mdesc->init_meminfo)
|
||||
mdesc->init_meminfo();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* paging_init() sets up the page tables, initialises the zone memory
|
||||
* maps, and sets up the zero page, bad page and bad page tables.
|
||||
|
|
|
@ -295,6 +295,15 @@ void __init sanity_check_meminfo(void)
|
|||
high_memory = __va(end - 1) + 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* early_paging_init() recreates boot time page table setup, allowing machines
|
||||
* to switch over to a high (>4G) address space on LPAE systems
|
||||
*/
|
||||
void __init early_paging_init(const struct machine_desc *mdesc,
|
||||
struct proc_info_list *procinfo)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* paging_init() sets up the page tables, initialises the zone memory
|
||||
* maps, and sets up the zero page, bad page and bad page tables.
|
||||
|
|
|
@ -220,9 +220,7 @@ __v6_setup:
|
|||
#endif /* CONFIG_MMU */
|
||||
adr r5, v6_crval
|
||||
ldmia r5, {r5, r6}
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
orr r6, r6, #1 << 25 @ big-endian page tables
|
||||
#endif
|
||||
ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
|
||||
mrc p15, 0, r0, c1, c0, 0 @ read control register
|
||||
bic r0, r0, r5 @ clear bits them
|
||||
orr r0, r0, r6 @ set them
|
||||
|
|
|
@ -367,9 +367,7 @@ __v7_setup:
|
|||
#endif
|
||||
adr r5, v7_crval
|
||||
ldmia r5, {r5, r6}
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
orr r6, r6, #1 << 25 @ big-endian page tables
|
||||
#endif
|
||||
ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
|
||||
#ifdef CONFIG_SWP_EMULATE
|
||||
orr r5, r5, #(1 << 10) @ set SW bit in "clear"
|
||||
bic r6, r6, #(1 << 10) @ clear it in "mmuset"
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/if_vlan.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/opcodes.h>
|
||||
|
||||
#include "bpf_jit_32.h"
|
||||
|
||||
|
@ -113,8 +114,11 @@ static u32 jit_udiv(u32 dividend, u32 divisor)
|
|||
|
||||
static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
|
||||
{
|
||||
inst |= (cond << 28);
|
||||
inst = __opcode_to_mem_arm(inst);
|
||||
|
||||
if (ctx->target != NULL)
|
||||
ctx->target[ctx->idx] = inst | (cond << 28);
|
||||
ctx->target[ctx->idx] = inst;
|
||||
|
||||
ctx->idx++;
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
/*
|
||||
* Realview/Versatile Express specific entry point for secondary CPUs.
|
||||
|
@ -17,6 +18,7 @@
|
|||
* until we're ready for them to initialise.
|
||||
*/
|
||||
ENTRY(versatile_secondary_startup)
|
||||
ARM_BE8(setend be)
|
||||
mrc p15, 0, r0, c0, c0, 5
|
||||
bic r0, #0xff000000
|
||||
adr r4, 1f
|
||||
|
|
|
@ -776,6 +776,22 @@ config CRYPTO_AES_ARM
|
|||
|
||||
See <http://csrc.nist.gov/encryption/aes/> for more information.
|
||||
|
||||
config CRYPTO_AES_ARM_BS
|
||||
tristate "Bit sliced AES using NEON instructions"
|
||||
depends on ARM && KERNEL_MODE_NEON
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_AES_ARM
|
||||
select CRYPTO_ABLK_HELPER
|
||||
help
|
||||
Use a faster and more secure NEON based implementation of AES in CBC,
|
||||
CTR and XTS modes
|
||||
|
||||
Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode
|
||||
and for XTS mode encryption, CBC and XTS mode decryption speedup is
|
||||
around 25%. (CBC encryption speed is not affected by this driver.)
|
||||
This implementation does not rely on any lookup tables so it is
|
||||
believed to be invulnerable to cache timing attacks.
|
||||
|
||||
config CRYPTO_ANUBIS
|
||||
tristate "Anubis cipher algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
|
|
|
@ -280,7 +280,7 @@ asmlinkage void __naked cci_enable_port_for_self(void)
|
|||
|
||||
/* Enable the CCI port */
|
||||
" ldr r0, [r0, %[offsetof_port_phys]] \n"
|
||||
" mov r3, #"__stringify(CCI_ENABLE_REQ)" \n"
|
||||
" mov r3, %[cci_enable_req]\n"
|
||||
" str r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n"
|
||||
|
||||
/* poll the status reg for completion */
|
||||
|
@ -288,7 +288,7 @@ asmlinkage void __naked cci_enable_port_for_self(void)
|
|||
" ldr r0, [r1] \n"
|
||||
" ldr r0, [r0, r1] @ cci_ctrl_base \n"
|
||||
"4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n"
|
||||
" tst r1, #1 \n"
|
||||
" tst r1, %[cci_control_status_bits] \n"
|
||||
" bne 4b \n"
|
||||
|
||||
" mov r0, #0 \n"
|
||||
|
@ -301,6 +301,8 @@ asmlinkage void __naked cci_enable_port_for_self(void)
|
|||
"7: .word cci_ctrl_phys - . \n"
|
||||
: :
|
||||
[sizeof_cpu_port] "i" (sizeof(cpu_port)),
|
||||
[cci_enable_req] "i" cpu_to_le32(CCI_ENABLE_REQ),
|
||||
[cci_control_status_bits] "i" cpu_to_le32(1),
|
||||
#ifndef __ARMEB__
|
||||
[offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)),
|
||||
#else
|
||||
|
|
|
@ -253,10 +253,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
|||
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
|
||||
return -EINVAL;
|
||||
|
||||
raw_spin_lock(&irq_controller_lock);
|
||||
mask = 0xff << shift;
|
||||
bit = gic_cpu_map[cpu] << shift;
|
||||
|
||||
raw_spin_lock(&irq_controller_lock);
|
||||
val = readl_relaxed(reg) & ~mask;
|
||||
writel_relaxed(val | bit, reg);
|
||||
raw_spin_unlock(&irq_controller_lock);
|
||||
|
@ -652,7 +651,9 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
|
|||
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long map = 0;
|
||||
unsigned long flags, map = 0;
|
||||
|
||||
raw_spin_lock_irqsave(&irq_controller_lock, flags);
|
||||
|
||||
/* Convert our logical CPU mask into a physical one. */
|
||||
for_each_cpu(cpu, mask)
|
||||
|
@ -666,9 +667,151 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
|||
|
||||
/* this always happens on GIC0 */
|
||||
writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
|
||||
|
||||
raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BL_SWITCHER
|
||||
/*
|
||||
* gic_send_sgi - send a SGI directly to given CPU interface number
|
||||
*
|
||||
* cpu_id: the ID for the destination CPU interface
|
||||
* irq: the IPI number to send a SGI for
|
||||
*/
|
||||
void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
|
||||
{
|
||||
BUG_ON(cpu_id >= NR_GIC_CPU_IF);
|
||||
cpu_id = 1 << cpu_id;
|
||||
/* this always happens on GIC0 */
|
||||
writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
|
||||
}
|
||||
|
||||
/*
|
||||
* gic_get_cpu_id - get the CPU interface ID for the specified CPU
|
||||
*
|
||||
* @cpu: the logical CPU number to get the GIC ID for.
|
||||
*
|
||||
* Return the CPU interface ID for the given logical CPU number,
|
||||
* or -1 if the CPU number is too large or the interface ID is
|
||||
* unknown (more than one bit set).
|
||||
*/
|
||||
int gic_get_cpu_id(unsigned int cpu)
|
||||
{
|
||||
unsigned int cpu_bit;
|
||||
|
||||
if (cpu >= NR_GIC_CPU_IF)
|
||||
return -1;
|
||||
cpu_bit = gic_cpu_map[cpu];
|
||||
if (cpu_bit & (cpu_bit - 1))
|
||||
return -1;
|
||||
return __ffs(cpu_bit);
|
||||
}
|
||||
|
||||
/*
|
||||
* gic_migrate_target - migrate IRQs to another CPU interface
|
||||
*
|
||||
* @new_cpu_id: the CPU target ID to migrate IRQs to
|
||||
*
|
||||
* Migrate all peripheral interrupts with a target matching the current CPU
|
||||
* to the interface corresponding to @new_cpu_id. The CPU interface mapping
|
||||
* is also updated. Targets to other CPU interfaces are unchanged.
|
||||
* This must be called with IRQs locally disabled.
|
||||
*/
|
||||
void gic_migrate_target(unsigned int new_cpu_id)
|
||||
{
|
||||
unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
|
||||
void __iomem *dist_base;
|
||||
int i, ror_val, cpu = smp_processor_id();
|
||||
u32 val, cur_target_mask, active_mask;
|
||||
|
||||
if (gic_nr >= MAX_GIC_NR)
|
||||
BUG();
|
||||
|
||||
dist_base = gic_data_dist_base(&gic_data[gic_nr]);
|
||||
if (!dist_base)
|
||||
return;
|
||||
gic_irqs = gic_data[gic_nr].gic_irqs;
|
||||
|
||||
cur_cpu_id = __ffs(gic_cpu_map[cpu]);
|
||||
cur_target_mask = 0x01010101 << cur_cpu_id;
|
||||
ror_val = (cur_cpu_id - new_cpu_id) & 31;
|
||||
|
||||
raw_spin_lock(&irq_controller_lock);
|
||||
|
||||
/* Update the target interface for this logical CPU */
|
||||
gic_cpu_map[cpu] = 1 << new_cpu_id;
|
||||
|
||||
/*
|
||||
* Find all the peripheral interrupts targetting the current
|
||||
* CPU interface and migrate them to the new CPU interface.
|
||||
* We skip DIST_TARGET 0 to 7 as they are read-only.
|
||||
*/
|
||||
for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
|
||||
val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
|
||||
active_mask = val & cur_target_mask;
|
||||
if (active_mask) {
|
||||
val &= ~active_mask;
|
||||
val |= ror32(active_mask, ror_val);
|
||||
writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
|
||||
}
|
||||
}
|
||||
|
||||
raw_spin_unlock(&irq_controller_lock);
|
||||
|
||||
/*
|
||||
* Now let's migrate and clear any potential SGIs that might be
|
||||
* pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET
|
||||
* is a banked register, we can only forward the SGI using
|
||||
* GIC_DIST_SOFTINT. The original SGI source is lost but Linux
|
||||
* doesn't use that information anyway.
|
||||
*
|
||||
* For the same reason we do not adjust SGI source information
|
||||
* for previously sent SGIs by us to other CPUs either.
|
||||
*/
|
||||
for (i = 0; i < 16; i += 4) {
|
||||
int j;
|
||||
val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
|
||||
if (!val)
|
||||
continue;
|
||||
writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
|
||||
for (j = i; j < i + 4; j++) {
|
||||
if (val & 0xff)
|
||||
writel_relaxed((1 << (new_cpu_id + 16)) | j,
|
||||
dist_base + GIC_DIST_SOFTINT);
|
||||
val >>= 8;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* gic_get_sgir_physaddr - get the physical address for the SGI register
|
||||
*
|
||||
* REturn the physical address of the SGI register to be used
|
||||
* by some early assembly code when the kernel is not yet available.
|
||||
*/
|
||||
static unsigned long gic_dist_physaddr;
|
||||
|
||||
unsigned long gic_get_sgir_physaddr(void)
|
||||
{
|
||||
if (!gic_dist_physaddr)
|
||||
return 0;
|
||||
return gic_dist_physaddr + GIC_DIST_SOFTINT;
|
||||
}
|
||||
|
||||
void __init gic_init_physaddr(struct device_node *node)
|
||||
{
|
||||
struct resource res;
|
||||
if (of_address_to_resource(node, 0, &res) == 0) {
|
||||
gic_dist_physaddr = res.start;
|
||||
pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
#define gic_init_physaddr(node) do { } while (0)
|
||||
#endif
|
||||
|
||||
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
|
@ -850,6 +993,8 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
|
|||
percpu_offset = 0;
|
||||
|
||||
gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
|
||||
if (!gic_cnt)
|
||||
gic_init_physaddr(node);
|
||||
|
||||
if (parent) {
|
||||
irq = irq_of_parse_and_map(node, 0);
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
#define GIC_DIST_TARGET 0x800
|
||||
#define GIC_DIST_CONFIG 0xc00
|
||||
#define GIC_DIST_SOFTINT 0xf00
|
||||
#define GIC_DIST_SGI_PENDING_CLEAR 0xf10
|
||||
#define GIC_DIST_SGI_PENDING_SET 0xf20
|
||||
|
||||
#define GICH_HCR 0x0
|
||||
#define GICH_VTR 0x4
|
||||
|
@ -74,6 +76,11 @@ static inline void gic_init(unsigned int nr, int start,
|
|||
gic_init_bases(nr, start, dist, cpu, 0, NULL);
|
||||
}
|
||||
|
||||
void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
|
||||
int gic_get_cpu_id(unsigned int cpu);
|
||||
void gic_migrate_target(unsigned int new_cpu_id);
|
||||
unsigned long gic_get_sgir_physaddr(void);
|
||||
|
||||
#endif /* __ASSEMBLY */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM power
|
||||
|
||||
#if !defined(_TRACE_POWER_CPU_MIGRATE_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_POWER_CPU_MIGRATE_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#define __cpu_migrate_proto \
|
||||
TP_PROTO(u64 timestamp, \
|
||||
u32 cpu_hwid)
|
||||
#define __cpu_migrate_args \
|
||||
TP_ARGS(timestamp, \
|
||||
cpu_hwid)
|
||||
|
||||
DECLARE_EVENT_CLASS(cpu_migrate,
|
||||
|
||||
__cpu_migrate_proto,
|
||||
__cpu_migrate_args,
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u64, timestamp )
|
||||
__field(u32, cpu_hwid )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->timestamp = timestamp;
|
||||
__entry->cpu_hwid = cpu_hwid;
|
||||
),
|
||||
|
||||
TP_printk("timestamp=%llu cpu_hwid=0x%08lX",
|
||||
(unsigned long long)__entry->timestamp,
|
||||
(unsigned long)__entry->cpu_hwid
|
||||
)
|
||||
);
|
||||
|
||||
#define __define_cpu_migrate_event(name) \
|
||||
DEFINE_EVENT(cpu_migrate, cpu_migrate_##name, \
|
||||
__cpu_migrate_proto, \
|
||||
__cpu_migrate_args \
|
||||
)
|
||||
|
||||
__define_cpu_migrate_event(begin);
|
||||
__define_cpu_migrate_event(finish);
|
||||
__define_cpu_migrate_event(current);
|
||||
|
||||
#undef __define_cpu_migrate
|
||||
#undef __cpu_migrate_proto
|
||||
#undef __cpu_migrate_args
|
||||
|
||||
/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */
|
||||
#ifndef _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING
|
||||
#define _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING
|
||||
|
||||
/*
|
||||
* Set from_phys_cpu and to_phys_cpu to CPU_MIGRATE_ALL_CPUS to indicate
|
||||
* a whole-cluster migration:
|
||||
*/
|
||||
#define CPU_MIGRATE_ALL_CPUS 0x80000000U
|
||||
#endif
|
||||
|
||||
#endif /* _TRACE_POWER_CPU_MIGRATE_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define TRACE_INCLUDE_FILE power_cpu_migrate
|
||||
#include <trace/define_trace.h>
|
|
@ -2,3 +2,6 @@ ifndef NO_DWARF
|
|||
PERF_HAVE_DWARF_REGS := 1
|
||||
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
|
||||
endif
|
||||
ifndef NO_LIBUNWIND
|
||||
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind.o
|
||||
endif
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
#ifndef ARCH_PERF_REGS_H
|
||||
#define ARCH_PERF_REGS_H
|
||||
|
||||
#include <stdlib.h>
|
||||
#include "../../util/types.h"
|
||||
#include <asm/perf_regs.h>
|
||||
|
||||
#define PERF_REGS_MASK ((1ULL << PERF_REG_ARM_MAX) - 1)
|
||||
#define PERF_REG_IP PERF_REG_ARM_PC
|
||||
#define PERF_REG_SP PERF_REG_ARM_SP
|
||||
|
||||
static inline const char *perf_reg_name(int id)
|
||||
{
|
||||
switch (id) {
|
||||
case PERF_REG_ARM_R0:
|
||||
return "r0";
|
||||
case PERF_REG_ARM_R1:
|
||||
return "r1";
|
||||
case PERF_REG_ARM_R2:
|
||||
return "r2";
|
||||
case PERF_REG_ARM_R3:
|
||||
return "r3";
|
||||
case PERF_REG_ARM_R4:
|
||||
return "r4";
|
||||
case PERF_REG_ARM_R5:
|
||||
return "r5";
|
||||
case PERF_REG_ARM_R6:
|
||||
return "r6";
|
||||
case PERF_REG_ARM_R7:
|
||||
return "r7";
|
||||
case PERF_REG_ARM_R8:
|
||||
return "r8";
|
||||
case PERF_REG_ARM_R9:
|
||||
return "r9";
|
||||
case PERF_REG_ARM_R10:
|
||||
return "r10";
|
||||
case PERF_REG_ARM_FP:
|
||||
return "fp";
|
||||
case PERF_REG_ARM_IP:
|
||||
return "ip";
|
||||
case PERF_REG_ARM_SP:
|
||||
return "sp";
|
||||
case PERF_REG_ARM_LR:
|
||||
return "lr";
|
||||
case PERF_REG_ARM_PC:
|
||||
return "pc";
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* ARCH_PERF_REGS_H */
|
|
@ -0,0 +1,48 @@
|
|||
|
||||
#include <errno.h>
|
||||
#include <libunwind.h>
|
||||
#include "perf_regs.h"
|
||||
#include "../../util/unwind.h"
|
||||
|
||||
int unwind__arch_reg_id(int regnum)
|
||||
{
|
||||
switch (regnum) {
|
||||
case UNW_ARM_R0:
|
||||
return PERF_REG_ARM_R0;
|
||||
case UNW_ARM_R1:
|
||||
return PERF_REG_ARM_R1;
|
||||
case UNW_ARM_R2:
|
||||
return PERF_REG_ARM_R2;
|
||||
case UNW_ARM_R3:
|
||||
return PERF_REG_ARM_R3;
|
||||
case UNW_ARM_R4:
|
||||
return PERF_REG_ARM_R4;
|
||||
case UNW_ARM_R5:
|
||||
return PERF_REG_ARM_R5;
|
||||
case UNW_ARM_R6:
|
||||
return PERF_REG_ARM_R6;
|
||||
case UNW_ARM_R7:
|
||||
return PERF_REG_ARM_R7;
|
||||
case UNW_ARM_R8:
|
||||
return PERF_REG_ARM_R8;
|
||||
case UNW_ARM_R9:
|
||||
return PERF_REG_ARM_R9;
|
||||
case UNW_ARM_R10:
|
||||
return PERF_REG_ARM_R10;
|
||||
case UNW_ARM_R11:
|
||||
return PERF_REG_ARM_FP;
|
||||
case UNW_ARM_R12:
|
||||
return PERF_REG_ARM_IP;
|
||||
case UNW_ARM_R13:
|
||||
return PERF_REG_ARM_SP;
|
||||
case UNW_ARM_R14:
|
||||
return PERF_REG_ARM_LR;
|
||||
case UNW_ARM_R15:
|
||||
return PERF_REG_ARM_PC;
|
||||
default:
|
||||
pr_err("unwind: invalid reg id %d\n", regnum);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
|
@ -29,6 +29,10 @@ ifeq ($(ARCH),x86_64)
|
|||
NO_PERF_REGS := 0
|
||||
LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
|
||||
endif
|
||||
ifeq ($(ARCH),arm)
|
||||
NO_PERF_REGS := 0
|
||||
LIBUNWIND_LIBS = -lunwind -lunwind-arm
|
||||
endif
|
||||
|
||||
ifeq ($(NO_PERF_REGS),0)
|
||||
CFLAGS += -DHAVE_PERF_REGS
|
||||
|
@ -208,8 +212,7 @@ ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y)
|
|||
endif # try-cc
|
||||
endif # NO_LIBELF
|
||||
|
||||
# There's only x86 (both 32 and 64) support for CFI unwind so far
|
||||
ifneq ($(ARCH),x86)
|
||||
ifeq ($(LIBUNWIND_LIBS),)
|
||||
NO_LIBUNWIND := 1
|
||||
endif
|
||||
|
||||
|
@ -223,9 +226,13 @@ endif
|
|||
|
||||
FLAGS_UNWIND=$(LIBUNWIND_CFLAGS) $(CFLAGS) $(LIBUNWIND_LDFLAGS) $(LDFLAGS) $(EXTLIBS) $(LIBUNWIND_LIBS)
|
||||
ifneq ($(call try-cc,$(SOURCE_LIBUNWIND),$(FLAGS_UNWIND),libunwind),y)
|
||||
msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 0.99);
|
||||
msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 1.1);
|
||||
NO_LIBUNWIND := 1
|
||||
endif # Libunwind support
|
||||
ifneq ($(call try-cc,$(SOURCE_LIBUNWIND_DEBUG_FRAME),$(FLAGS_UNWIND),libunwind debug_frame),y)
|
||||
msg := $(warning No debug_frame support found in libunwind);
|
||||
CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
|
||||
endif # debug_frame support in libunwind
|
||||
endif # NO_LIBUNWIND
|
||||
|
||||
ifndef NO_LIBUNWIND
|
||||
|
|
|
@ -185,7 +185,6 @@ extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
|
|||
unw_proc_info_t *pi,
|
||||
int need_unwind_info, void *arg);
|
||||
|
||||
|
||||
#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
|
||||
|
||||
int main(void)
|
||||
|
@ -197,6 +196,26 @@ int main(void)
|
|||
return 0;
|
||||
}
|
||||
endef
|
||||
|
||||
define SOURCE_LIBUNWIND_DEBUG_FRAME
|
||||
#include <libunwind.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
extern int
|
||||
UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
|
||||
unw_word_t ip, unw_word_t segbase,
|
||||
const char *obj_name, unw_word_t start,
|
||||
unw_word_t end);
|
||||
|
||||
#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
|
||||
|
||||
int main(void)
|
||||
{
|
||||
dwarf_find_debug_frame(0, NULL, 0, 0, NULL, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
endef
|
||||
|
||||
endif
|
||||
|
||||
ifndef NO_BACKTRACE
|
||||
|
|
|
@ -39,6 +39,15 @@ UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
|
|||
|
||||
#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
|
||||
|
||||
extern int
|
||||
UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
|
||||
unw_word_t ip,
|
||||
unw_word_t segbase,
|
||||
const char *obj_name, unw_word_t start,
|
||||
unw_word_t end);
|
||||
|
||||
#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
|
||||
|
||||
#define DW_EH_PE_FORMAT_MASK 0x0f /* format of the encoded value */
|
||||
#define DW_EH_PE_APPL_MASK 0x70 /* how the value is to be applied */
|
||||
|
||||
|
@ -245,8 +254,9 @@ static int unwind_spec_ehframe(struct dso *dso, struct machine *machine,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int read_unwind_spec(struct dso *dso, struct machine *machine,
|
||||
u64 *table_data, u64 *segbase, u64 *fde_count)
|
||||
static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
|
||||
u64 *table_data, u64 *segbase,
|
||||
u64 *fde_count)
|
||||
{
|
||||
int ret = -EINVAL, fd;
|
||||
u64 offset;
|
||||
|
@ -255,6 +265,7 @@ static int read_unwind_spec(struct dso *dso, struct machine *machine,
|
|||
if (fd < 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check the .eh_frame section for unwinding info */
|
||||
offset = elf_section_offset(fd, ".eh_frame_hdr");
|
||||
close(fd);
|
||||
|
||||
|
@ -263,10 +274,29 @@ static int read_unwind_spec(struct dso *dso, struct machine *machine,
|
|||
table_data, segbase,
|
||||
fde_count);
|
||||
|
||||
/* TODO .debug_frame check if eh_frame_hdr fails */
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifndef NO_LIBUNWIND_DEBUG_FRAME
|
||||
static int read_unwind_spec_debug_frame(struct dso *dso,
|
||||
struct machine *machine, u64 *offset)
|
||||
{
|
||||
int fd = dso__data_fd(dso, machine);
|
||||
|
||||
if (fd < 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check the .debug_frame section for unwinding info */
|
||||
*offset = elf_section_offset(fd, ".debug_frame");
|
||||
close(fd);
|
||||
|
||||
if (*offset)
|
||||
return 0;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct map *find_map(unw_word_t ip, struct unwind_info *ui)
|
||||
{
|
||||
struct addr_location al;
|
||||
|
@ -291,20 +321,33 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
|
|||
|
||||
pr_debug("unwind: find_proc_info dso %s\n", map->dso->name);
|
||||
|
||||
if (read_unwind_spec(map->dso, ui->machine,
|
||||
&table_data, &segbase, &fde_count))
|
||||
return -EINVAL;
|
||||
/* Check the .eh_frame section for unwinding info */
|
||||
if (!read_unwind_spec_eh_frame(map->dso, ui->machine,
|
||||
&table_data, &segbase, &fde_count)) {
|
||||
memset(&di, 0, sizeof(di));
|
||||
di.format = UNW_INFO_FORMAT_REMOTE_TABLE;
|
||||
di.start_ip = map->start;
|
||||
di.end_ip = map->end;
|
||||
di.u.rti.segbase = map->start + segbase;
|
||||
di.u.rti.table_data = map->start + table_data;
|
||||
di.u.rti.table_len = fde_count * sizeof(struct table_entry)
|
||||
/ sizeof(unw_word_t);
|
||||
return dwarf_search_unwind_table(as, ip, &di, pi,
|
||||
need_unwind_info, arg);
|
||||
}
|
||||
|
||||
memset(&di, 0, sizeof(di));
|
||||
di.format = UNW_INFO_FORMAT_REMOTE_TABLE;
|
||||
di.start_ip = map->start;
|
||||
di.end_ip = map->end;
|
||||
di.u.rti.segbase = map->start + segbase;
|
||||
di.u.rti.table_data = map->start + table_data;
|
||||
di.u.rti.table_len = fde_count * sizeof(struct table_entry)
|
||||
/ sizeof(unw_word_t);
|
||||
return dwarf_search_unwind_table(as, ip, &di, pi,
|
||||
need_unwind_info, arg);
|
||||
#ifndef NO_LIBUNWIND_DEBUG_FRAME
|
||||
/* Check the .debug_frame section for unwinding info */
|
||||
if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
|
||||
memset(&di, 0, sizeof(di));
|
||||
dwarf_find_debug_frame(0, &di, ip, 0, map->dso->name,
|
||||
map->start, map->end);
|
||||
return dwarf_search_unwind_table(as, ip, &di, pi,
|
||||
need_unwind_info, arg);
|
||||
}
|
||||
#endif
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int access_fpreg(unw_addr_space_t __maybe_unused as,
|
||||
|
|
Загрузка…
Ссылка в новой задаче