x86: fall back on interrupt disable in cmpxchg8b on 80386 and 80486
Actually, on 386, cmpxchg and cmpxchg_local fall back on cmpxchg_386_u8/16/32: it disables interruptions around non atomic updates to mimic the cmpxchg behavior. The comment: /* Poor man's cmpxchg for 386. Unsuitable for SMP */ already present in cmpxchg_386_u32 tells much about how this cmpxchg implementation should not be used in a SMP context. However, the cmpxchg_local can perfectly use this fallback, since it only needs to be atomic wrt the local cpu. This patch adds a cmpxchg_486_u64 and uses it as a fallback for cmpxchg64 and cmpxchg64_local on 80386 and 80486. Q: but why is it called cmpxchg_486 when the other functions are called A: Because the standard cmpxchg is missing only on 386, but cmpxchg8b is missing both on 386 and 486. Citing Intel's Instruction set reference: cmpxchg: This instruction is not supported on Intel processors earlier than the Intel486 processors. cmpxchg8b: This instruction encoding is not supported on Intel processors earlier than the Pentium processors. Q: What's the reason to have cmpxchg64_local on 32 bit architectures? Without that need all this would just be a few simple defines. A: cmpxchg64_local on 32 bits architectures takes unsigned long long parameters, but cmpxchg_local only takes longs. Since we have cmpxchg8b to execute a 8 byte cmpxchg atomically on pentium and +, it makes sense to provide a flavor of cmpxchg and cmpxchg_local using this instruction. Also, for 32 bits architectures lacking the 64 bits atomic cmpxchg, it makes sense _not_ to define cmpxchg64 while cmpxchg could still be available. Moreover, the fallback for cmpxchg8b on i386 for 386 and 486 is a However, cmpxchg64_local will be emulated by disabling interrupts on all architectures where it is not supported atomically. Therefore, we *could* turn cmpxchg64_local into a cmpxchg_local, but it would make the 386/486 fallbacks ugly, make its design different from cmpxchg/cmpxchg64 (which really depends on atomic operations and cannot be emulated) and require the __cmpxchg_local to be expressed as a macro rather than an inline function so the parameters would not be fixed to unsigned long long in every case. So I think cmpxchg64_local makes sense there, but I am open to suggestions. Q: Are there any callers? A: I am actually using it in LTTng in my timestamping code. I use it to work around CPUs with asynchronous TSCs. I need to update 64 bits values atomically on this 32 bits architecture. Changelog: - Ran though checkpatch. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Родитель
5f627f8e12
Коммит
2c0b8a7578
|
@ -342,5 +342,22 @@ unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
|
|||
EXPORT_SYMBOL(cmpxchg_386_u32);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_X86_CMPXCHG64
|
||||
unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
|
||||
{
|
||||
u64 prev;
|
||||
unsigned long flags;
|
||||
|
||||
/* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
|
||||
local_irq_save(flags);
|
||||
prev = *(u64 *)ptr;
|
||||
if (prev == old)
|
||||
*(u64 *)ptr = new;
|
||||
local_irq_restore(flags);
|
||||
return prev;
|
||||
}
|
||||
EXPORT_SYMBOL(cmpxchg_486_u64);
|
||||
#endif
|
||||
|
||||
// arch_initcall(intel_cpu_init);
|
||||
|
||||
|
|
|
@ -105,15 +105,24 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
|
|||
|
||||
#ifdef CONFIG_X86_CMPXCHG
|
||||
#define __HAVE_ARCH_CMPXCHG 1
|
||||
#define cmpxchg(ptr,o,n)\
|
||||
((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
|
||||
(unsigned long)(n),sizeof(*(ptr))))
|
||||
#define sync_cmpxchg(ptr,o,n)\
|
||||
((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
|
||||
(unsigned long)(n),sizeof(*(ptr))))
|
||||
#define cmpxchg_local(ptr,o,n)\
|
||||
((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\
|
||||
(unsigned long)(n),sizeof(*(ptr))))
|
||||
#define cmpxchg(ptr, o, n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
|
||||
(unsigned long)(n), sizeof(*(ptr))))
|
||||
#define sync_cmpxchg(ptr, o, n) \
|
||||
((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
|
||||
(unsigned long)(n), sizeof(*(ptr))))
|
||||
#define cmpxchg_local(ptr, o, n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
|
||||
(unsigned long)(n), sizeof(*(ptr))))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_CMPXCHG64
|
||||
#define cmpxchg64(ptr, o, n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
|
||||
(unsigned long long)(n)))
|
||||
#define cmpxchg64_local(ptr, o, n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o),\
|
||||
(unsigned long long)(n)))
|
||||
#endif
|
||||
|
||||
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
|
||||
|
@ -203,57 +212,8 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
|
|||
return old;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_X86_CMPXCHG
|
||||
/*
|
||||
* Building a kernel capable running on 80386. It may be necessary to
|
||||
* simulate the cmpxchg on the 80386 CPU. For that purpose we define
|
||||
* a function for each of the sizes we support.
|
||||
*/
|
||||
|
||||
extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
|
||||
extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
|
||||
extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
|
||||
|
||||
static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
return cmpxchg_386_u8(ptr, old, new);
|
||||
case 2:
|
||||
return cmpxchg_386_u16(ptr, old, new);
|
||||
case 4:
|
||||
return cmpxchg_386_u32(ptr, old, new);
|
||||
}
|
||||
return old;
|
||||
}
|
||||
|
||||
#define cmpxchg(ptr,o,n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
if (likely(boot_cpu_data.x86 > 3)) \
|
||||
__ret = __cmpxchg((ptr), (unsigned long)(o), \
|
||||
(unsigned long)(n), sizeof(*(ptr))); \
|
||||
else \
|
||||
__ret = cmpxchg_386((ptr), (unsigned long)(o), \
|
||||
(unsigned long)(n), sizeof(*(ptr))); \
|
||||
__ret; \
|
||||
})
|
||||
#define cmpxchg_local(ptr,o,n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
if (likely(boot_cpu_data.x86 > 3)) \
|
||||
__ret = __cmpxchg_local((ptr), (unsigned long)(o), \
|
||||
(unsigned long)(n), sizeof(*(ptr))); \
|
||||
else \
|
||||
__ret = cmpxchg_386((ptr), (unsigned long)(o), \
|
||||
(unsigned long)(n), sizeof(*(ptr))); \
|
||||
__ret; \
|
||||
})
|
||||
#endif
|
||||
|
||||
static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
|
||||
unsigned long long new)
|
||||
static inline unsigned long long __cmpxchg64(volatile void *ptr,
|
||||
unsigned long long old, unsigned long long new)
|
||||
{
|
||||
unsigned long long prev;
|
||||
__asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
|
||||
|
@ -280,10 +240,86 @@ static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
|
|||
return prev;
|
||||
}
|
||||
|
||||
#define cmpxchg64(ptr,o,n)\
|
||||
((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
|
||||
(unsigned long long)(n)))
|
||||
#define cmpxchg64_local(ptr,o,n)\
|
||||
((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
|
||||
(unsigned long long)(n)))
|
||||
#ifndef CONFIG_X86_CMPXCHG
|
||||
/*
|
||||
* Building a kernel capable running on 80386. It may be necessary to
|
||||
* simulate the cmpxchg on the 80386 CPU. For that purpose we define
|
||||
* a function for each of the sizes we support.
|
||||
*/
|
||||
|
||||
extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
|
||||
extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
|
||||
extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
|
||||
|
||||
static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
return cmpxchg_386_u8(ptr, old, new);
|
||||
case 2:
|
||||
return cmpxchg_386_u16(ptr, old, new);
|
||||
case 4:
|
||||
return cmpxchg_386_u32(ptr, old, new);
|
||||
}
|
||||
return old;
|
||||
}
|
||||
|
||||
#define cmpxchg(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
if (likely(boot_cpu_data.x86 > 3)) \
|
||||
__ret = __cmpxchg((ptr), (unsigned long)(o), \
|
||||
(unsigned long)(n), sizeof(*(ptr))); \
|
||||
else \
|
||||
__ret = cmpxchg_386((ptr), (unsigned long)(o), \
|
||||
(unsigned long)(n), sizeof(*(ptr))); \
|
||||
__ret; \
|
||||
})
|
||||
#define cmpxchg_local(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
if (likely(boot_cpu_data.x86 > 3)) \
|
||||
__ret = __cmpxchg_local((ptr), (unsigned long)(o), \
|
||||
(unsigned long)(n), sizeof(*(ptr))); \
|
||||
else \
|
||||
__ret = cmpxchg_386((ptr), (unsigned long)(o), \
|
||||
(unsigned long)(n), sizeof(*(ptr))); \
|
||||
__ret; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_X86_CMPXCHG64
|
||||
/*
|
||||
* Building a kernel capable running on 80386 and 80486. It may be necessary
|
||||
* to simulate the cmpxchg8b on the 80386 and 80486 CPU.
|
||||
*/
|
||||
|
||||
extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
|
||||
|
||||
#define cmpxchg64(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
if (likely(boot_cpu_data.x86 > 4)) \
|
||||
__ret = __cmpxchg64((ptr), (unsigned long long)(o), \
|
||||
(unsigned long long)(n)); \
|
||||
else \
|
||||
__ret = cmpxchg_486_u64((ptr), (unsigned long long)(o), \
|
||||
(unsigned long long)(n)); \
|
||||
__ret; \
|
||||
})
|
||||
#define cmpxchg64_local(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
if (likely(boot_cpu_data.x86 > 4)) \
|
||||
__ret = __cmpxchg64_local((ptr), (unsigned long long)(o), \
|
||||
(unsigned long long)(n)); \
|
||||
else \
|
||||
__ret = cmpxchg_486_u64((ptr), (unsigned long long)(o), \
|
||||
(unsigned long long)(n)); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
Загрузка…
Ссылка в новой задаче