* Handling unaligned access in zero delay loops
 * spinlock livelock fix for SMP systemC model
 * fixing 32bit overflow in access_ok
 * better setup of clockevents
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.12 (GNU/Linux)
 
 iQIcBAABAgAGBQJSSP42AAoJEGnX8d3iisJeH1wP/RJMY8sJBSipS0vg8YVO/0YJ
 gryBNNljppc8H+SFBrgzq//+YvO3KyEibNTm17ZfDijahCfEBzXpdjoXQCW3gOt9
 8MlWhvLTNHbcCwwCroKAMfXkXu1DIcwgbw7FeOWmYYkgK/GhU5TTFyQBtoUvMMDM
 G/nEik1rdKi9tzXMi333c43YGCxn7ezwQbNZk3vNzpFTa/CuKUobiW0ohjyiWeEw
 RdENsnwtxtCE9HuPqTaf0/YQnpP5EkoMZw6USHJ81/QseEgLbAst6Vh4E/g6EpLM
 nC/73f9VouVr3ozKxa0FgcNaF5/fJf8NaqrnbbBLuy6/kb0u0PSt061vF02gUTkA
 F40bqB3hD7mzvt8YjMHDMm70WT1+5q7I/qlKzxxjDtdEjLNGJt/JAMgpocV3CTww
 Br5x6Sq8GT1GEvM5kMm/+eUh/BZBvvLscKOmzCoI5+/PZZgttuv4Lzm29unFsgor
 1axQndW4vBh49OsltJTs/mI6mRpYL5IH8J7Umy4MhENzeqBUhQLtfZmDhl7RdxpA
 XHDtSXEotsaoVZCsHOncleTnenv4Yc0MqyYUhQdff+tXemHGIyBH012ohbrvKP28
 fJKDubTz3FzzH2CMZ4q2HyeNTQRNwG+3PpER+gKXYm38WXBgmKNzj602kaCeiadX
 aC7DgC+cCiD3F6mtQEak
 =wN+V
 -----END PGP SIGNATURE-----

Merge tag 'arc-fixes-for-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull ARC Fixes from Vineet Gupta:
 - Handle unaligned access in zero delay loops
 - spinlock livelock fix for SMP systemC model
 - fix 32bit overflow in access_ok
 - better setup of clockevents

* tag 'arc-fixes-for-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
  ARC: Use clockevents_config_and_register over clockevents_register_device
  ARC: Workaround spinlock livelock in SMP SystemC simulation
  ARC: Fix 32-bit wrap around in access_ok()
  ARC: Handle zero-overhead-loop in unaligned access handler
This commit is contained in:
Linus Torvalds 2013-09-30 10:37:05 -07:00
Родитель 15c03dd485 55c2e26204
Коммит 815a4bb18b
4 изменённых файлов: 18 добавлений и 8 удалений

Просмотреть файл

@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
__asm__ __volatile__(
" ex %0, [%1] \n"
: "+r" (tmp)
: "r"(&(lock->slock))
: "memory");
smp_mb();
}

Просмотреть файл

@ -43,7 +43,7 @@
* Because it essentially checks if buffer end is within limit and @len is
* non-ngeative, which implies that buffer start will be within limit too.
*
* The reason for rewriting being, for majorit yof cases, @len is generally
* The reason for rewriting being, for majority of cases, @len is generally
* compile time constant, causing first sub-expression to be compile time
* subsumed.
*
@ -53,7 +53,7 @@
*
*/
#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
(((addr)+(sz)) <= get_fs()))
((addr) <= (get_fs() - (sz))))
#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
likely(__user_ok((addr), (sz))))

Просмотреть файл

@ -227,12 +227,9 @@ void __attribute__((weak)) arc_local_timer_setup(unsigned int cpu)
{
struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5);
clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk);
clk->cpumask = cpumask_of(cpu);
clockevents_register_device(clk);
clockevents_config_and_register(clk, arc_get_core_freq(),
0, ARC_TIMER_MAX);
/*
* setup the per-cpu timer IRQ handler - for all cpus

Просмотреть файл

@ -245,6 +245,12 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
regs->status32 &= ~STATUS_DE_MASK;
} else {
regs->ret += state.instr_len;
/* handle zero-overhead-loop */
if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
regs->ret = regs->lp_start;
regs->lp_count--;
}
}
return 0;