зеркало из https://github.com/microsoft/STL.git
<atomic> refactoring to support C11 (#2846)
This commit is contained in:
Родитель
d066aef441
Коммит
97b476c17f
503
stl/inc/atomic
503
stl/inc/atomic
|
@ -29,26 +29,6 @@ _STL_DISABLE_CLANG_WARNINGS
|
|||
#pragma push_macro("new")
|
||||
#undef new
|
||||
|
||||
#define _Compiler_barrier() _STL_DISABLE_DEPRECATED_WARNING _ReadWriteBarrier() _STL_RESTORE_DEPRECATED_WARNING
|
||||
|
||||
#if defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC)
|
||||
#define _Memory_barrier() __dmb(0xB) // inner shared data memory barrier
|
||||
#define _Compiler_or_memory_barrier() _Memory_barrier()
|
||||
#elif defined(_M_IX86) || defined(_M_X64)
|
||||
// x86/x64 hardware only emits memory barriers inside _Interlocked intrinsics
|
||||
#define _Compiler_or_memory_barrier() _Compiler_barrier()
|
||||
#else // ^^^ x86/x64 / unsupported hardware vvv
|
||||
#error Unsupported hardware
|
||||
#endif // hardware
|
||||
|
||||
#ifndef _INVALID_MEMORY_ORDER
|
||||
#ifdef _DEBUG
|
||||
#define _INVALID_MEMORY_ORDER _STL_REPORT_ERROR("Invalid memory order")
|
||||
#else // ^^^ _DEBUG / !_DEBUG vvv
|
||||
#define _INVALID_MEMORY_ORDER
|
||||
#endif // _DEBUG
|
||||
#endif // _INVALID_MEMORY_ORDER
|
||||
|
||||
#ifdef _WIN64
|
||||
#if _STD_ATOMIC_ALWAYS_USE_CMPXCHG16B == 1
|
||||
#define _STD_COMPARE_EXCHANGE_128 _InterlockedCompareExchange128
|
||||
|
@ -73,33 +53,6 @@ extern "C" _NODISCARD char __stdcall __std_atomic_has_cmpxchg16b() noexcept;
|
|||
#define _ATOMIC_HAS_DCAS 0
|
||||
#endif // _STD_ATOMIC_ALWAYS_USE_CMPXCHG16B == 1 || !defined(_M_X64) || defined(_M_ARM64EC)
|
||||
|
||||
#if defined(_M_IX86) || (defined(_M_X64) && !defined(_M_ARM64EC))
|
||||
#define _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _Intrinsic, ...) \
|
||||
_Check_memory_order(_Order); \
|
||||
_Result = _Intrinsic(__VA_ARGS__)
|
||||
#elif defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC)
|
||||
#define _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _Intrinsic, ...) \
|
||||
switch (_Order) { \
|
||||
case memory_order_relaxed: \
|
||||
_Result = _INTRIN_RELAXED(_Intrinsic)(__VA_ARGS__); \
|
||||
break; \
|
||||
case memory_order_consume: \
|
||||
case memory_order_acquire: \
|
||||
_Result = _INTRIN_ACQUIRE(_Intrinsic)(__VA_ARGS__); \
|
||||
break; \
|
||||
case memory_order_release: \
|
||||
_Result = _INTRIN_RELEASE(_Intrinsic)(__VA_ARGS__); \
|
||||
break; \
|
||||
default: \
|
||||
_INVALID_MEMORY_ORDER; \
|
||||
/* [[fallthrough]]; */ \
|
||||
case memory_order_acq_rel: \
|
||||
case memory_order_seq_cst: \
|
||||
_Result = _Intrinsic(__VA_ARGS__); \
|
||||
break; \
|
||||
}
|
||||
#endif // hardware
|
||||
|
||||
#define ATOMIC_BOOL_LOCK_FREE 2
|
||||
#define ATOMIC_CHAR_LOCK_FREE 2
|
||||
#ifdef __cpp_lib_char8_t
|
||||
|
@ -114,6 +67,161 @@ extern "C" _NODISCARD char __stdcall __std_atomic_has_cmpxchg16b() noexcept;
|
|||
#define ATOMIC_LLONG_LOCK_FREE 2
|
||||
#define ATOMIC_POINTER_LOCK_FREE 2
|
||||
|
||||
// The following code is SHARED with vcruntime and any updates
|
||||
// should be mirrored. Also: if any macros are added they should be
|
||||
// #undefed in vcruntime as well
|
||||
|
||||
enum {
|
||||
_Atomic_memory_order_relaxed,
|
||||
_Atomic_memory_order_consume,
|
||||
_Atomic_memory_order_acquire,
|
||||
_Atomic_memory_order_release,
|
||||
_Atomic_memory_order_acq_rel,
|
||||
_Atomic_memory_order_seq_cst,
|
||||
};
|
||||
|
||||
#ifndef _INVALID_MEMORY_ORDER
|
||||
#ifdef _DEBUG
|
||||
#define _INVALID_MEMORY_ORDER _STL_REPORT_ERROR("Invalid memory order")
|
||||
#else // ^^^ _DEBUG / !_DEBUG vvv
|
||||
#define _INVALID_MEMORY_ORDER
|
||||
#endif // _DEBUG
|
||||
#endif // _INVALID_MEMORY_ORDER
|
||||
|
||||
extern "C" inline void _Check_memory_order(const unsigned int _Order) noexcept {
|
||||
if (_Order > _Atomic_memory_order_seq_cst) {
|
||||
_INVALID_MEMORY_ORDER;
|
||||
}
|
||||
}
|
||||
|
||||
#define _Compiler_barrier() _STL_DISABLE_DEPRECATED_WARNING _ReadWriteBarrier() _STL_RESTORE_DEPRECATED_WARNING
|
||||
|
||||
#if defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC)
|
||||
#define _Memory_barrier() __dmb(0xB) // inner shared data memory barrier
|
||||
#define _Compiler_or_memory_barrier() _Memory_barrier()
|
||||
#elif defined(_M_IX86) || defined(_M_X64)
|
||||
// x86/x64 hardware only emits memory barriers inside _Interlocked intrinsics
|
||||
#define _Compiler_or_memory_barrier() _Compiler_barrier()
|
||||
#else // ^^^ x86/x64 / unsupported hardware vvv
|
||||
#error Unsupported hardware
|
||||
#endif // hardware
|
||||
|
||||
#if defined(_M_IX86) || (defined(_M_X64) && !defined(_M_ARM64EC))
|
||||
#define _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _Intrinsic, ...) \
|
||||
_Check_memory_order(_Order); \
|
||||
_Result = _Intrinsic(__VA_ARGS__)
|
||||
#elif defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC)
|
||||
#define _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _Intrinsic, ...) \
|
||||
switch (_Order) { \
|
||||
case _Atomic_memory_order_relaxed: \
|
||||
_Result = _INTRIN_RELAXED(_Intrinsic)(__VA_ARGS__); \
|
||||
break; \
|
||||
case _Atomic_memory_order_consume: \
|
||||
case _Atomic_memory_order_acquire: \
|
||||
_Result = _INTRIN_ACQUIRE(_Intrinsic)(__VA_ARGS__); \
|
||||
break; \
|
||||
case _Atomic_memory_order_release: \
|
||||
_Result = _INTRIN_RELEASE(_Intrinsic)(__VA_ARGS__); \
|
||||
break; \
|
||||
default: \
|
||||
_INVALID_MEMORY_ORDER; \
|
||||
/* [[fallthrough]]; */ \
|
||||
case _Atomic_memory_order_acq_rel: \
|
||||
case _Atomic_memory_order_seq_cst: \
|
||||
_Result = _Intrinsic(__VA_ARGS__); \
|
||||
break; \
|
||||
}
|
||||
#endif // hardware
|
||||
|
||||
// note: these macros are _not_ always safe to use with a trailing semicolon,
|
||||
// we avoid wrapping them in do {} while (0) because MSVC generates code for such loops
|
||||
// in debug mode.
|
||||
#define _ATOMIC_LOAD_VERIFY_MEMORY_ORDER(_Order_var) \
|
||||
switch (_Order_var) { \
|
||||
case _Atomic_memory_order_relaxed: \
|
||||
break; \
|
||||
case _Atomic_memory_order_consume: \
|
||||
case _Atomic_memory_order_acquire: \
|
||||
case _Atomic_memory_order_seq_cst: \
|
||||
_Compiler_or_memory_barrier(); \
|
||||
break; \
|
||||
case _Atomic_memory_order_release: \
|
||||
case _Atomic_memory_order_acq_rel: \
|
||||
default: \
|
||||
_INVALID_MEMORY_ORDER; \
|
||||
break; \
|
||||
}
|
||||
|
||||
#define _ATOMIC_STORE_PREFIX(_Width, _Ptr, _Desired) \
|
||||
case _Atomic_memory_order_relaxed: \
|
||||
__iso_volatile_store##_Width((_Ptr), (_Desired)); \
|
||||
return; \
|
||||
case _Atomic_memory_order_release: \
|
||||
_Compiler_or_memory_barrier(); \
|
||||
__iso_volatile_store##_Width((_Ptr), (_Desired)); \
|
||||
return; \
|
||||
default: \
|
||||
case _Atomic_memory_order_consume: \
|
||||
case _Atomic_memory_order_acquire: \
|
||||
case _Atomic_memory_order_acq_rel: \
|
||||
_INVALID_MEMORY_ORDER; \
|
||||
/* [[fallthrough]]; */
|
||||
|
||||
|
||||
#define _ATOMIC_STORE_SEQ_CST_ARM(_Width, _Ptr, _Desired) \
|
||||
_Memory_barrier(); \
|
||||
__iso_volatile_store##_Width((_Ptr), (_Desired)); \
|
||||
_Memory_barrier();
|
||||
#define _ATOMIC_STORE_SEQ_CST_X86_X64(_Width, _Ptr, _Desired) (void) _InterlockedExchange##_Width((_Ptr), (_Desired));
|
||||
#define _ATOMIC_STORE_32_SEQ_CST_X86_X64(_Ptr, _Desired) \
|
||||
(void) _InterlockedExchange(reinterpret_cast<volatile long*>(_Ptr), static_cast<long>(_Desired));
|
||||
|
||||
#define _ATOMIC_STORE_64_SEQ_CST_IX86(_Ptr, _Desired) \
|
||||
_Compiler_barrier(); \
|
||||
__iso_volatile_store64((_Ptr), (_Desired)); \
|
||||
_Atomic_thread_fence(_Atomic_memory_order_seq_cst);
|
||||
|
||||
#if defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC)
|
||||
#define _ATOMIC_STORE_SEQ_CST(_Width, _Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_ARM(_Width, (_Ptr), (_Desired))
|
||||
#define _ATOMIC_STORE_32_SEQ_CST(_Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_ARM(32, (_Ptr), (_Desired))
|
||||
#define _ATOMIC_STORE_64_SEQ_CST(_Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_ARM(64, (_Ptr), (_Desired))
|
||||
#else // ^^^ ARM32/ARM64/ARM64EC hardware / x86/x64 hardware vvv
|
||||
#define _ATOMIC_STORE_SEQ_CST(_Width, _Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_X86_X64(_Width, (_Ptr), (_Desired))
|
||||
#define _ATOMIC_STORE_32_SEQ_CST(_Ptr, _Desired) _ATOMIC_STORE_32_SEQ_CST_X86_X64((_Ptr), (_Desired))
|
||||
#ifdef _M_IX86
|
||||
#define _ATOMIC_STORE_64_SEQ_CST(_Ptr, _Desired) _ATOMIC_STORE_64_SEQ_CST_IX86((_Ptr), (_Desired))
|
||||
#else // ^^^ x86 / x64 vvv
|
||||
#define _ATOMIC_STORE_64_SEQ_CST(_Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_X86_X64(64, (_Ptr), (_Desired))
|
||||
#endif // x86/x64
|
||||
#endif // hardware
|
||||
|
||||
extern "C" inline void _Atomic_thread_fence(const unsigned int _Order) noexcept {
|
||||
if (_Order == _Atomic_memory_order_relaxed) {
|
||||
return;
|
||||
}
|
||||
|
||||
#if defined(_M_IX86) || (defined(_M_X64) && !defined(_M_ARM64EC))
|
||||
_Compiler_barrier();
|
||||
if (_Order == _Atomic_memory_order_seq_cst) {
|
||||
volatile long _Guard; // Not initialized to avoid an unnecessary operation; the value does not matter
|
||||
|
||||
// _mm_mfence could have been used, but it is not supported on older x86 CPUs and is slower on some recent CPUs.
|
||||
// The memory fence provided by interlocked operations has some exceptions, but this is fine:
|
||||
// std::atomic_thread_fence works with respect to other atomics only; it may not be a full fence for all ops.
|
||||
#pragma warning(suppress : 6001) // "Using uninitialized memory '_Guard'"
|
||||
#pragma warning(suppress : 28113) // "Accessing a local variable _Guard via an Interlocked function: This is an unusual
|
||||
// usage which could be reconsidered."
|
||||
(void) _InterlockedIncrement(&_Guard);
|
||||
_Compiler_barrier();
|
||||
}
|
||||
#elif defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC)
|
||||
_Memory_barrier();
|
||||
#else // ^^^ ARM32/ARM64/ARM64EC / unsupported hardware vvv
|
||||
#error Unsupported hardware
|
||||
#endif // unsupported hardware
|
||||
}
|
||||
// End of code shared with vcruntime
|
||||
|
||||
_EXTERN_C
|
||||
_Smtx_t* __stdcall __std_atomic_get_mutex(const void* _Key) noexcept;
|
||||
_END_EXTERN_C
|
||||
|
@ -164,29 +272,7 @@ inline constexpr bool _Might_have_non_value_bits =
|
|||
#endif // _CMPXCHG_MASK_OUT_PADDING_BITS
|
||||
|
||||
extern "C" inline void atomic_thread_fence(const memory_order _Order) noexcept {
|
||||
if (_Order == memory_order_relaxed) {
|
||||
return;
|
||||
}
|
||||
|
||||
#if defined(_M_IX86) || (defined(_M_X64) && !defined(_M_ARM64EC))
|
||||
_Compiler_barrier();
|
||||
if (_Order == memory_order_seq_cst) {
|
||||
volatile long _Guard; // Not initialized to avoid an unnecessary operation; the value does not matter
|
||||
|
||||
// _mm_mfence could have been used, but it is not supported on older x86 CPUs and is slower on some recent CPUs.
|
||||
// The memory fence provided by interlocked operations has some exceptions, but this is fine:
|
||||
// std::atomic_thread_fence works with respect to other atomics only; it may not be a full fence for all ops.
|
||||
#pragma warning(suppress : 6001) // "Using uninitialized memory '_Guard'"
|
||||
#pragma warning(suppress : 28113) // "Accessing a local variable _Guard via an Interlocked function: This is an unusual
|
||||
// usage which could be reconsidered."
|
||||
(void) _InterlockedIncrement(&_Guard);
|
||||
_Compiler_barrier();
|
||||
}
|
||||
#elif defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC)
|
||||
_Memory_barrier();
|
||||
#else // ^^^ ARM32/ARM64/ARM64EC / unsupported hardware vvv
|
||||
#error Unsupported hardware
|
||||
#endif // unsupported hardware
|
||||
::_Atomic_thread_fence(static_cast<unsigned int>(_Order));
|
||||
}
|
||||
|
||||
extern "C" inline void atomic_signal_fence(const memory_order _Order) noexcept {
|
||||
|
@ -200,13 +286,6 @@ _Ty kill_dependency(_Ty _Arg) noexcept { // "magic" template that kills dependen
|
|||
return _Arg;
|
||||
}
|
||||
|
||||
inline void _Check_memory_order(const memory_order _Order) noexcept {
|
||||
// check that _Order is a valid memory_order
|
||||
if (static_cast<unsigned int>(_Order) > static_cast<unsigned int>(memory_order_seq_cst)) {
|
||||
_INVALID_MEMORY_ORDER;
|
||||
}
|
||||
}
|
||||
|
||||
inline void _Check_store_memory_order(const memory_order _Order) noexcept {
|
||||
switch (_Order) {
|
||||
case memory_order_relaxed:
|
||||
|
@ -266,7 +345,7 @@ _NODISCARD inline memory_order _Combine_cas_memory_orders(
|
|||
{memory_order_seq_cst, memory_order_seq_cst, memory_order_seq_cst, memory_order_seq_cst, memory_order_seq_cst,
|
||||
memory_order_seq_cst}};
|
||||
|
||||
_Check_memory_order(_Success);
|
||||
_Check_memory_order(static_cast<unsigned int>(_Success));
|
||||
_Check_load_memory_order(_Failure);
|
||||
return _Combined_memory_orders[static_cast<int>(_Success)][static_cast<int>(_Failure)];
|
||||
}
|
||||
|
@ -286,13 +365,6 @@ _NODISCARD _Integral _Atomic_reinterpret_as(const _Ty& _Source) noexcept {
|
|||
}
|
||||
}
|
||||
|
||||
inline void _Load_barrier(const memory_order _Order) noexcept { // implement memory barrier for atomic load functions
|
||||
_Check_load_memory_order(_Order);
|
||||
if (_Order != memory_order_relaxed) {
|
||||
_Compiler_or_memory_barrier();
|
||||
}
|
||||
}
|
||||
|
||||
#if 1 // TRANSITION, ABI
|
||||
template <class _Ty>
|
||||
struct _Atomic_padded {
|
||||
|
@ -510,7 +582,7 @@ struct _Atomic_storage {
|
|||
|
||||
_TVal exchange(const _TVal _Value, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
// exchange _Value with _Storage with sequential consistency
|
||||
_Check_memory_order(_Order);
|
||||
_Check_memory_order(static_cast<unsigned int>(_Order));
|
||||
_Guard _Lock{_Spinlock};
|
||||
_TVal _Result(_Storage);
|
||||
_Storage = _Value;
|
||||
|
@ -519,7 +591,7 @@ struct _Atomic_storage {
|
|||
|
||||
bool compare_exchange_strong(_TVal& _Expected, const _TVal _Desired,
|
||||
const memory_order _Order = memory_order_seq_cst) noexcept { // CAS with sequential consistency, plain
|
||||
_Check_memory_order(_Order);
|
||||
_Check_memory_order(static_cast<unsigned int>(_Order));
|
||||
const auto _Storage_ptr = _STD addressof(_Storage);
|
||||
const auto _Expected_ptr = _STD addressof(_Expected);
|
||||
bool _Result;
|
||||
|
@ -629,33 +701,15 @@ struct _Atomic_storage<_Ty, 1> { // lock-free using 1-byte intrinsics
|
|||
void store(const _TVal _Value) noexcept { // store with sequential consistency
|
||||
const auto _Mem = _Atomic_address_as<char>(_Storage);
|
||||
const char _As_bytes = _Atomic_reinterpret_as<char>(_Value);
|
||||
#if defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC)
|
||||
_Memory_barrier();
|
||||
__iso_volatile_store8(_Mem, _As_bytes);
|
||||
_Memory_barrier();
|
||||
#else // ^^^ ARM32/ARM64/ARM64EC hardware / x86/x64 hardware vvv
|
||||
(void) _InterlockedExchange8(_Mem, _As_bytes);
|
||||
#endif // hardware
|
||||
_ATOMIC_STORE_SEQ_CST(8, _Mem, _As_bytes)
|
||||
}
|
||||
|
||||
void store(const _TVal _Value, const memory_order _Order) noexcept { // store with given memory order
|
||||
const auto _Mem = _Atomic_address_as<char>(_Storage);
|
||||
const char _As_bytes = _Atomic_reinterpret_as<char>(_Value);
|
||||
switch (_Order) {
|
||||
case memory_order_relaxed:
|
||||
__iso_volatile_store8(_Mem, _As_bytes);
|
||||
return;
|
||||
case memory_order_release:
|
||||
_Compiler_or_memory_barrier();
|
||||
__iso_volatile_store8(_Mem, _As_bytes);
|
||||
return;
|
||||
default:
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
case memory_order_acq_rel:
|
||||
_INVALID_MEMORY_ORDER;
|
||||
// [[fallthrough]];
|
||||
case memory_order_seq_cst:
|
||||
switch (static_cast<unsigned int>(_Order)) {
|
||||
_ATOMIC_STORE_PREFIX(8, _Mem, _As_bytes)
|
||||
case _Atomic_memory_order_seq_cst:
|
||||
store(_Value);
|
||||
return;
|
||||
}
|
||||
|
@ -671,15 +725,15 @@ struct _Atomic_storage<_Ty, 1> { // lock-free using 1-byte intrinsics
|
|||
_NODISCARD _TVal load(const memory_order _Order) const noexcept { // load with given memory order
|
||||
const auto _Mem = _Atomic_address_as<char>(_Storage);
|
||||
char _As_bytes = __iso_volatile_load8(_Mem);
|
||||
_Load_barrier(_Order);
|
||||
_ATOMIC_LOAD_VERIFY_MEMORY_ORDER(static_cast<unsigned int>(_Order))
|
||||
return reinterpret_cast<_TVal&>(_As_bytes);
|
||||
}
|
||||
|
||||
_TVal exchange(const _TVal _Value, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
// exchange with given memory order
|
||||
char _As_bytes;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _As_bytes, _InterlockedExchange8, _Atomic_address_as<char>(_Storage),
|
||||
_Atomic_reinterpret_as<char>(_Value));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _As_bytes, _InterlockedExchange8,
|
||||
_Atomic_address_as<char>(_Storage), _Atomic_reinterpret_as<char>(_Value));
|
||||
return reinterpret_cast<_TVal&>(_As_bytes);
|
||||
}
|
||||
|
||||
|
@ -694,7 +748,7 @@ struct _Atomic_storage<_Ty, 1> { // lock-free using 1-byte intrinsics
|
|||
const char _Mask_val = _Atomic_reinterpret_as<char>(_Mask._Ref());
|
||||
|
||||
for (;;) {
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Prev_bytes, _InterlockedCompareExchange8,
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Prev_bytes, _InterlockedCompareExchange8,
|
||||
_Atomic_address_as<char>(_Storage), _Atomic_reinterpret_as<char>(_Desired), _Expected_bytes);
|
||||
if (_Prev_bytes == _Expected_bytes) {
|
||||
return true;
|
||||
|
@ -708,8 +762,8 @@ struct _Atomic_storage<_Ty, 1> { // lock-free using 1-byte intrinsics
|
|||
}
|
||||
}
|
||||
#endif // _CMPXCHG_MASK_OUT_PADDING_BITS
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Prev_bytes, _InterlockedCompareExchange8, _Atomic_address_as<char>(_Storage),
|
||||
_Atomic_reinterpret_as<char>(_Desired), _Expected_bytes);
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Prev_bytes, _InterlockedCompareExchange8,
|
||||
_Atomic_address_as<char>(_Storage), _Atomic_reinterpret_as<char>(_Desired), _Expected_bytes);
|
||||
if (_Prev_bytes == _Expected_bytes) {
|
||||
return true;
|
||||
}
|
||||
|
@ -750,33 +804,15 @@ struct _Atomic_storage<_Ty, 2> { // lock-free using 2-byte intrinsics
|
|||
void store(const _TVal _Value) noexcept { // store with sequential consistency
|
||||
const auto _Mem = _Atomic_address_as<short>(_Storage);
|
||||
const short _As_bytes = _Atomic_reinterpret_as<short>(_Value);
|
||||
#if defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC)
|
||||
_Memory_barrier();
|
||||
__iso_volatile_store16(_Mem, _As_bytes);
|
||||
_Memory_barrier();
|
||||
#else // ^^^ ARM32/ARM64/ARM64EC hardware / x86/x64 hardware vvv
|
||||
(void) _InterlockedExchange16(_Mem, _As_bytes);
|
||||
#endif // hardware
|
||||
_ATOMIC_STORE_SEQ_CST(16, _Mem, _As_bytes)
|
||||
}
|
||||
|
||||
void store(const _TVal _Value, const memory_order _Order) noexcept { // store with given memory order
|
||||
const auto _Mem = _Atomic_address_as<short>(_Storage);
|
||||
const short _As_bytes = _Atomic_reinterpret_as<short>(_Value);
|
||||
switch (_Order) {
|
||||
case memory_order_relaxed:
|
||||
__iso_volatile_store16(_Mem, _As_bytes);
|
||||
return;
|
||||
case memory_order_release:
|
||||
_Compiler_or_memory_barrier();
|
||||
__iso_volatile_store16(_Mem, _As_bytes);
|
||||
return;
|
||||
default:
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
case memory_order_acq_rel:
|
||||
_INVALID_MEMORY_ORDER;
|
||||
// [[fallthrough]];
|
||||
case memory_order_seq_cst:
|
||||
switch (static_cast<unsigned int>(_Order)) {
|
||||
_ATOMIC_STORE_PREFIX(16, _Mem, _As_bytes)
|
||||
case _Atomic_memory_order_seq_cst:
|
||||
store(_Value);
|
||||
return;
|
||||
}
|
||||
|
@ -792,15 +828,15 @@ struct _Atomic_storage<_Ty, 2> { // lock-free using 2-byte intrinsics
|
|||
_NODISCARD _TVal load(const memory_order _Order) const noexcept { // load with given memory order
|
||||
const auto _Mem = _Atomic_address_as<short>(_Storage);
|
||||
short _As_bytes = __iso_volatile_load16(_Mem);
|
||||
_Load_barrier(_Order);
|
||||
_ATOMIC_LOAD_VERIFY_MEMORY_ORDER(static_cast<unsigned int>(_Order))
|
||||
return reinterpret_cast<_TVal&>(_As_bytes);
|
||||
}
|
||||
|
||||
_TVal exchange(const _TVal _Value, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
// exchange with given memory order
|
||||
short _As_bytes;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _As_bytes, _InterlockedExchange16, _Atomic_address_as<short>(_Storage),
|
||||
_Atomic_reinterpret_as<short>(_Value));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _As_bytes, _InterlockedExchange16,
|
||||
_Atomic_address_as<short>(_Storage), _Atomic_reinterpret_as<short>(_Value));
|
||||
return reinterpret_cast<_TVal&>(_As_bytes);
|
||||
}
|
||||
|
||||
|
@ -814,7 +850,7 @@ struct _Atomic_storage<_Ty, 2> { // lock-free using 2-byte intrinsics
|
|||
const short _Mask_val = _Atomic_reinterpret_as<short>(_Mask._Ref());
|
||||
|
||||
for (;;) {
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Prev_bytes, _InterlockedCompareExchange16,
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Prev_bytes, _InterlockedCompareExchange16,
|
||||
_Atomic_address_as<short>(_Storage), _Atomic_reinterpret_as<short>(_Desired), _Expected_bytes);
|
||||
if (_Prev_bytes == _Expected_bytes) {
|
||||
return true;
|
||||
|
@ -828,7 +864,7 @@ struct _Atomic_storage<_Ty, 2> { // lock-free using 2-byte intrinsics
|
|||
}
|
||||
}
|
||||
#endif // _CMPXCHG_MASK_OUT_PADDING_BITS
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Prev_bytes, _InterlockedCompareExchange16,
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Prev_bytes, _InterlockedCompareExchange16,
|
||||
_Atomic_address_as<short>(_Storage), _Atomic_reinterpret_as<short>(_Desired), _Expected_bytes);
|
||||
if (_Prev_bytes == _Expected_bytes) {
|
||||
return true;
|
||||
|
@ -868,33 +904,17 @@ struct _Atomic_storage<_Ty, 4> { // lock-free using 4-byte intrinsics
|
|||
}
|
||||
|
||||
void store(const _TVal _Value) noexcept { // store with sequential consistency
|
||||
#if defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC)
|
||||
_Memory_barrier();
|
||||
__iso_volatile_store32(_Atomic_address_as<int>(_Storage), _Atomic_reinterpret_as<int>(_Value));
|
||||
_Memory_barrier();
|
||||
#else // ^^^ ARM32/ARM64/ARM64EC hardware / x86/x64 hardware vvv
|
||||
(void) _InterlockedExchange(_Atomic_address_as<long>(_Storage), _Atomic_reinterpret_as<long>(_Value));
|
||||
#endif // hardware
|
||||
const auto _Mem = _Atomic_address_as<int>(_Storage);
|
||||
const int _As_bytes = _Atomic_reinterpret_as<int>(_Value);
|
||||
_ATOMIC_STORE_32_SEQ_CST(_Mem, _As_bytes)
|
||||
}
|
||||
|
||||
void store(const _TVal _Value, const memory_order _Order) noexcept { // store with given memory order
|
||||
const auto _Mem = _Atomic_address_as<int>(_Storage);
|
||||
const int _As_bytes = _Atomic_reinterpret_as<int>(_Value);
|
||||
switch (_Order) {
|
||||
case memory_order_relaxed:
|
||||
__iso_volatile_store32(_Mem, _As_bytes);
|
||||
return;
|
||||
case memory_order_release:
|
||||
_Compiler_or_memory_barrier();
|
||||
__iso_volatile_store32(_Mem, _As_bytes);
|
||||
return;
|
||||
default:
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
case memory_order_acq_rel:
|
||||
_INVALID_MEMORY_ORDER;
|
||||
// [[fallthrough]];
|
||||
case memory_order_seq_cst:
|
||||
switch (static_cast<unsigned int>(_Order)) {
|
||||
_ATOMIC_STORE_PREFIX(32, _Mem, _As_bytes)
|
||||
case _Atomic_memory_order_seq_cst:
|
||||
store(_Value);
|
||||
return;
|
||||
}
|
||||
|
@ -902,23 +922,23 @@ struct _Atomic_storage<_Ty, 4> { // lock-free using 4-byte intrinsics
|
|||
|
||||
_NODISCARD _TVal load() const noexcept { // load with sequential consistency
|
||||
const auto _Mem = _Atomic_address_as<int>(_Storage);
|
||||
auto _As_bytes = __iso_volatile_load32(_Mem);
|
||||
int _As_bytes = __iso_volatile_load32(_Mem);
|
||||
_Compiler_or_memory_barrier();
|
||||
return reinterpret_cast<_TVal&>(_As_bytes);
|
||||
}
|
||||
|
||||
_NODISCARD _TVal load(const memory_order _Order) const noexcept { // load with given memory order
|
||||
const auto _Mem = _Atomic_address_as<int>(_Storage);
|
||||
auto _As_bytes = __iso_volatile_load32(_Mem);
|
||||
_Load_barrier(_Order);
|
||||
int _As_bytes = __iso_volatile_load32(_Mem);
|
||||
_ATOMIC_LOAD_VERIFY_MEMORY_ORDER(static_cast<unsigned int>(_Order))
|
||||
return reinterpret_cast<_TVal&>(_As_bytes);
|
||||
}
|
||||
|
||||
_TVal exchange(const _TVal _Value, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
// exchange with given memory order
|
||||
long _As_bytes;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _As_bytes, _InterlockedExchange, _Atomic_address_as<long>(_Storage),
|
||||
_Atomic_reinterpret_as<long>(_Value));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _As_bytes, _InterlockedExchange,
|
||||
_Atomic_address_as<long>(_Storage), _Atomic_reinterpret_as<long>(_Value));
|
||||
return reinterpret_cast<_TVal&>(_As_bytes);
|
||||
}
|
||||
|
||||
|
@ -932,7 +952,7 @@ struct _Atomic_storage<_Ty, 4> { // lock-free using 4-byte intrinsics
|
|||
const long _Mask_val = _Atomic_reinterpret_as<long>(_Mask);
|
||||
|
||||
for (;;) {
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Prev_bytes, _InterlockedCompareExchange,
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Prev_bytes, _InterlockedCompareExchange,
|
||||
_Atomic_address_as<long>(_Storage), _Atomic_reinterpret_as<long>(_Desired), _Expected_bytes);
|
||||
if (_Prev_bytes == _Expected_bytes) {
|
||||
return true;
|
||||
|
@ -946,8 +966,8 @@ struct _Atomic_storage<_Ty, 4> { // lock-free using 4-byte intrinsics
|
|||
}
|
||||
}
|
||||
#endif // _CMPXCHG_MASK_OUT_PADDING_BITS
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Prev_bytes, _InterlockedCompareExchange, _Atomic_address_as<long>(_Storage),
|
||||
_Atomic_reinterpret_as<long>(_Desired), _Expected_bytes);
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Prev_bytes, _InterlockedCompareExchange,
|
||||
_Atomic_address_as<long>(_Storage), _Atomic_reinterpret_as<long>(_Desired), _Expected_bytes);
|
||||
if (_Prev_bytes == _Expected_bytes) {
|
||||
return true;
|
||||
}
|
||||
|
@ -988,37 +1008,15 @@ struct _Atomic_storage<_Ty, 8> { // lock-free using 8-byte intrinsics
|
|||
void store(const _TVal _Value) noexcept { // store with sequential consistency
|
||||
const auto _Mem = _Atomic_address_as<long long>(_Storage);
|
||||
const long long _As_bytes = _Atomic_reinterpret_as<long long>(_Value);
|
||||
#if defined(_M_IX86)
|
||||
_Compiler_barrier();
|
||||
__iso_volatile_store64(_Mem, _As_bytes);
|
||||
_STD atomic_thread_fence(memory_order_seq_cst);
|
||||
#elif defined(_M_ARM64) || defined(_M_ARM64EC)
|
||||
_Memory_barrier();
|
||||
__iso_volatile_store64(_Mem, _As_bytes);
|
||||
_Memory_barrier();
|
||||
#else // ^^^ _M_ARM64, _M_ARM64EC / ARM32, x64 vvv
|
||||
(void) _InterlockedExchange64(_Mem, _As_bytes);
|
||||
#endif // ^^^ ARM32, x64 ^^^
|
||||
_ATOMIC_STORE_64_SEQ_CST(_Mem, _As_bytes);
|
||||
}
|
||||
|
||||
void store(const _TVal _Value, const memory_order _Order) noexcept { // store with given memory order
|
||||
const auto _Mem = _Atomic_address_as<long long>(_Storage);
|
||||
const long long _As_bytes = _Atomic_reinterpret_as<long long>(_Value);
|
||||
switch (_Order) {
|
||||
case memory_order_relaxed:
|
||||
__iso_volatile_store64(_Mem, _As_bytes);
|
||||
return;
|
||||
case memory_order_release:
|
||||
_Compiler_or_memory_barrier();
|
||||
__iso_volatile_store64(_Mem, _As_bytes);
|
||||
return;
|
||||
default:
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
case memory_order_acq_rel:
|
||||
_INVALID_MEMORY_ORDER;
|
||||
// [[fallthrough]];
|
||||
case memory_order_seq_cst:
|
||||
switch (static_cast<unsigned int>(_Order)) {
|
||||
_ATOMIC_STORE_PREFIX(64, _Mem, _As_bytes)
|
||||
case _Atomic_memory_order_seq_cst:
|
||||
store(_Value);
|
||||
return;
|
||||
}
|
||||
|
@ -1026,14 +1024,12 @@ struct _Atomic_storage<_Ty, 8> { // lock-free using 8-byte intrinsics
|
|||
|
||||
_NODISCARD _TVal load() const noexcept { // load with sequential consistency
|
||||
const auto _Mem = _Atomic_address_as<long long>(_Storage);
|
||||
long long _As_bytes;
|
||||
#ifdef _M_ARM
|
||||
_As_bytes = __ldrexd(_Mem);
|
||||
_Memory_barrier();
|
||||
long long _As_bytes = __ldrexd(_Mem);
|
||||
#else
|
||||
_As_bytes = __iso_volatile_load64(_Mem);
|
||||
_Compiler_or_memory_barrier();
|
||||
long long _As_bytes = __iso_volatile_load64(_Mem);
|
||||
#endif
|
||||
_Compiler_or_memory_barrier();
|
||||
return reinterpret_cast<_TVal&>(_As_bytes);
|
||||
}
|
||||
|
||||
|
@ -1044,7 +1040,7 @@ struct _Atomic_storage<_Ty, 8> { // lock-free using 8-byte intrinsics
|
|||
#else
|
||||
long long _As_bytes = __iso_volatile_load64(_Mem);
|
||||
#endif
|
||||
_Load_barrier(_Order);
|
||||
_ATOMIC_LOAD_VERIFY_MEMORY_ORDER(static_cast<unsigned int>(_Order))
|
||||
return reinterpret_cast<_TVal&>(_As_bytes);
|
||||
}
|
||||
|
||||
|
@ -1061,8 +1057,8 @@ struct _Atomic_storage<_Ty, 8> { // lock-free using 8-byte intrinsics
|
|||
_TVal exchange(const _TVal _Value, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
// exchange with given memory order
|
||||
long long _As_bytes;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _As_bytes, _InterlockedExchange64, _Atomic_address_as<long long>(_Storage),
|
||||
_Atomic_reinterpret_as<long long>(_Value));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _As_bytes, _InterlockedExchange64,
|
||||
_Atomic_address_as<long long>(_Storage), _Atomic_reinterpret_as<long long>(_Value));
|
||||
return reinterpret_cast<_TVal&>(_As_bytes);
|
||||
}
|
||||
#endif // _M_IX86
|
||||
|
@ -1078,7 +1074,7 @@ struct _Atomic_storage<_Ty, 8> { // lock-free using 8-byte intrinsics
|
|||
const long long _Mask_val = _Atomic_reinterpret_as<long long>(_Mask);
|
||||
|
||||
for (;;) {
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Prev_bytes, _InterlockedCompareExchange64,
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Prev_bytes, _InterlockedCompareExchange64,
|
||||
_Atomic_address_as<long long>(_Storage), _Atomic_reinterpret_as<long long>(_Desired),
|
||||
_Expected_bytes);
|
||||
if (_Prev_bytes == _Expected_bytes) {
|
||||
|
@ -1093,7 +1089,7 @@ struct _Atomic_storage<_Ty, 8> { // lock-free using 8-byte intrinsics
|
|||
}
|
||||
}
|
||||
#endif // _CMPXCHG_MASK_OUT_PADDING_BITS
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Prev_bytes, _InterlockedCompareExchange64,
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Prev_bytes, _InterlockedCompareExchange64,
|
||||
_Atomic_address_as<long long>(_Storage), _Atomic_reinterpret_as<long long>(_Desired), _Expected_bytes);
|
||||
if (_Prev_bytes == _Expected_bytes) {
|
||||
return true;
|
||||
|
@ -1209,7 +1205,7 @@ struct _Atomic_storage<_Ty&, 16> { // lock-free using 16-byte intrinsics
|
|||
_CSTD memcpy(&_Mask_val, _Mask._Ptr(), sizeof(_TVal));
|
||||
for (;;) {
|
||||
#if defined(_M_ARM64) || defined(_M_ARM64EC)
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedCompareExchange128,
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedCompareExchange128,
|
||||
_Atomic_address_as<long long>(_Storage), _Desired_bytes._High, _Desired_bytes._Low,
|
||||
&_Expected_temp._Low);
|
||||
#else // ^^^ _M_ARM64, _M_ARM64EC / _M_X64 vvv
|
||||
|
@ -1235,7 +1231,7 @@ struct _Atomic_storage<_Ty&, 16> { // lock-free using 16-byte intrinsics
|
|||
}
|
||||
#endif // _CMPXCHG_MASK_OUT_PADDING_BITS
|
||||
#if defined(_M_ARM64) || defined(_M_ARM64EC)
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedCompareExchange128,
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedCompareExchange128,
|
||||
_Atomic_address_as<long long>(_Storage), _Desired_bytes._High, _Desired_bytes._Low, &_Expected_temp._Low);
|
||||
#else // ^^^ _M_ARM64, _M_ARM64EC / _M_X64 vvv
|
||||
(void) _Order;
|
||||
|
@ -1310,29 +1306,29 @@ struct _Atomic_integral<_Ty, 1> : _Atomic_storage<_Ty> { // atomic integral oper
|
|||
|
||||
_TVal fetch_add(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
char _Result;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd8, _Atomic_address_as<char>(this->_Storage),
|
||||
static_cast<char>(_Operand));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedExchangeAdd8,
|
||||
_Atomic_address_as<char>(this->_Storage), static_cast<char>(_Operand));
|
||||
return static_cast<_TVal>(_Result);
|
||||
}
|
||||
|
||||
_TVal fetch_and(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
char _Result;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(
|
||||
_Order, _Result, _InterlockedAnd8, _Atomic_address_as<char>(this->_Storage), static_cast<char>(_Operand));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedAnd8,
|
||||
_Atomic_address_as<char>(this->_Storage), static_cast<char>(_Operand));
|
||||
return static_cast<_TVal>(_Result);
|
||||
}
|
||||
|
||||
_TVal fetch_or(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
char _Result;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(
|
||||
_Order, _Result, _InterlockedOr8, _Atomic_address_as<char>(this->_Storage), static_cast<char>(_Operand));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedOr8,
|
||||
_Atomic_address_as<char>(this->_Storage), static_cast<char>(_Operand));
|
||||
return static_cast<_TVal>(_Result);
|
||||
}
|
||||
|
||||
_TVal fetch_xor(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
char _Result;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(
|
||||
_Order, _Result, _InterlockedXor8, _Atomic_address_as<char>(this->_Storage), static_cast<char>(_Operand));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedXor8,
|
||||
_Atomic_address_as<char>(this->_Storage), static_cast<char>(_Operand));
|
||||
return static_cast<_TVal>(_Result);
|
||||
}
|
||||
|
||||
|
@ -1368,29 +1364,29 @@ struct _Atomic_integral<_Ty, 2> : _Atomic_storage<_Ty> { // atomic integral oper
|
|||
|
||||
_TVal fetch_add(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
short _Result;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd16, _Atomic_address_as<short>(this->_Storage),
|
||||
static_cast<short>(_Operand));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedExchangeAdd16,
|
||||
_Atomic_address_as<short>(this->_Storage), static_cast<short>(_Operand));
|
||||
return static_cast<_TVal>(_Result);
|
||||
}
|
||||
|
||||
_TVal fetch_and(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
short _Result;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedAnd16, _Atomic_address_as<short>(this->_Storage),
|
||||
static_cast<short>(_Operand));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedAnd16,
|
||||
_Atomic_address_as<short>(this->_Storage), static_cast<short>(_Operand));
|
||||
return static_cast<_TVal>(_Result);
|
||||
}
|
||||
|
||||
_TVal fetch_or(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
short _Result;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(
|
||||
_Order, _Result, _InterlockedOr16, _Atomic_address_as<short>(this->_Storage), static_cast<short>(_Operand));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedOr16,
|
||||
_Atomic_address_as<short>(this->_Storage), static_cast<short>(_Operand));
|
||||
return static_cast<_TVal>(_Result);
|
||||
}
|
||||
|
||||
_TVal fetch_xor(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
short _Result;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedXor16, _Atomic_address_as<short>(this->_Storage),
|
||||
static_cast<short>(_Operand));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedXor16,
|
||||
_Atomic_address_as<short>(this->_Storage), static_cast<short>(_Operand));
|
||||
return static_cast<_TVal>(_Result);
|
||||
}
|
||||
|
||||
|
@ -1426,29 +1422,29 @@ struct _Atomic_integral<_Ty, 4> : _Atomic_storage<_Ty> { // atomic integral oper
|
|||
|
||||
_TVal fetch_add(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
long _Result;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd, _Atomic_address_as<long>(this->_Storage),
|
||||
static_cast<long>(_Operand));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedExchangeAdd,
|
||||
_Atomic_address_as<long>(this->_Storage), static_cast<long>(_Operand));
|
||||
return static_cast<_TVal>(_Result);
|
||||
}
|
||||
|
||||
_TVal fetch_and(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
long _Result;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(
|
||||
_Order, _Result, _InterlockedAnd, _Atomic_address_as<long>(this->_Storage), static_cast<long>(_Operand));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedAnd,
|
||||
_Atomic_address_as<long>(this->_Storage), static_cast<long>(_Operand));
|
||||
return static_cast<_TVal>(_Result);
|
||||
}
|
||||
|
||||
_TVal fetch_or(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
long _Result;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(
|
||||
_Order, _Result, _InterlockedOr, _Atomic_address_as<long>(this->_Storage), static_cast<long>(_Operand));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedOr,
|
||||
_Atomic_address_as<long>(this->_Storage), static_cast<long>(_Operand));
|
||||
return static_cast<_TVal>(_Result);
|
||||
}
|
||||
|
||||
_TVal fetch_xor(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
long _Result;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(
|
||||
_Order, _Result, _InterlockedXor, _Atomic_address_as<long>(this->_Storage), static_cast<long>(_Operand));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedXor,
|
||||
_Atomic_address_as<long>(this->_Storage), static_cast<long>(_Operand));
|
||||
return static_cast<_TVal>(_Result);
|
||||
}
|
||||
|
||||
|
@ -1538,29 +1534,29 @@ struct _Atomic_integral<_Ty, 8> : _Atomic_storage<_Ty> { // atomic integral oper
|
|||
#else // ^^^ _M_IX86 / !_M_IX86 vvv
|
||||
_TVal fetch_add(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
long long _Result;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd64,
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedExchangeAdd64,
|
||||
_Atomic_address_as<long long>(this->_Storage), static_cast<long long>(_Operand));
|
||||
return static_cast<_TVal>(_Result);
|
||||
}
|
||||
|
||||
_TVal fetch_and(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
long long _Result;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedAnd64, _Atomic_address_as<long long>(this->_Storage),
|
||||
static_cast<long long>(_Operand));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedAnd64,
|
||||
_Atomic_address_as<long long>(this->_Storage), static_cast<long long>(_Operand));
|
||||
return static_cast<_TVal>(_Result);
|
||||
}
|
||||
|
||||
_TVal fetch_or(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
long long _Result;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedOr64, _Atomic_address_as<long long>(this->_Storage),
|
||||
static_cast<long long>(_Operand));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedOr64,
|
||||
_Atomic_address_as<long long>(this->_Storage), static_cast<long long>(_Operand));
|
||||
return static_cast<_TVal>(_Result);
|
||||
}
|
||||
|
||||
_TVal fetch_xor(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
long long _Result;
|
||||
_ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedXor64, _Atomic_address_as<long long>(this->_Storage),
|
||||
static_cast<long long>(_Operand));
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedXor64,
|
||||
_Atomic_address_as<long long>(this->_Storage), static_cast<long long>(_Operand));
|
||||
return static_cast<_TVal>(_Result);
|
||||
}
|
||||
|
||||
|
@ -1927,11 +1923,11 @@ struct _Atomic_pointer : _Atomic_storage<_Ty> {
|
|||
static_cast<ptrdiff_t>(static_cast<size_t>(_Diff) * sizeof(remove_pointer_t<_Ty>));
|
||||
ptrdiff_t _Result;
|
||||
#if defined(_M_IX86) || defined(_M_ARM)
|
||||
_ATOMIC_CHOOSE_INTRINSIC(
|
||||
_Order, _Result, _InterlockedExchangeAdd, _Atomic_address_as<long>(this->_Storage), _Shift_bytes);
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedExchangeAdd,
|
||||
_Atomic_address_as<long>(this->_Storage), _Shift_bytes);
|
||||
#else // ^^^ 32 bits / 64 bits vvv
|
||||
_ATOMIC_CHOOSE_INTRINSIC(
|
||||
_Order, _Result, _InterlockedExchangeAdd64, _Atomic_address_as<long long>(this->_Storage), _Shift_bytes);
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedExchangeAdd64,
|
||||
_Atomic_address_as<long long>(this->_Storage), _Shift_bytes);
|
||||
#endif // hardware
|
||||
return reinterpret_cast<_Ty>(_Result);
|
||||
}
|
||||
|
@ -2024,11 +2020,11 @@ struct _Atomic_pointer<_Ty&> : _Atomic_storage<_Ty&> {
|
|||
static_cast<ptrdiff_t>(static_cast<size_t>(_Diff) * sizeof(remove_pointer_t<_Ty>));
|
||||
ptrdiff_t _Result;
|
||||
#if defined(_M_IX86) || defined(_M_ARM)
|
||||
_ATOMIC_CHOOSE_INTRINSIC(
|
||||
_Order, _Result, _InterlockedExchangeAdd, _Atomic_address_as<long>(this->_Storage), _Shift_bytes);
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedExchangeAdd,
|
||||
_Atomic_address_as<long>(this->_Storage), _Shift_bytes);
|
||||
#else // ^^^ 32 bits / 64 bits vvv
|
||||
_ATOMIC_CHOOSE_INTRINSIC(
|
||||
_Order, _Result, _InterlockedExchangeAdd64, _Atomic_address_as<long long>(this->_Storage), _Shift_bytes);
|
||||
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedExchangeAdd64,
|
||||
_Atomic_address_as<long long>(this->_Storage), _Shift_bytes);
|
||||
#endif // hardware
|
||||
return reinterpret_cast<_Ty>(_Result);
|
||||
}
|
||||
|
@ -2966,6 +2962,15 @@ _STD_END
|
|||
#undef _CMPXCHG_MASK_OUT_PADDING_BITS
|
||||
|
||||
#undef _ATOMIC_CHOOSE_INTRINSIC
|
||||
#undef _ATOMIC_LOAD_VERIFY_MEMORY_ORDER
|
||||
#undef _ATOMIC_STORE_PREFIX
|
||||
#undef _ATOMIC_STORE_SEQ_CST_ARM
|
||||
#undef _ATOMIC_STORE_SEQ_CST_X86_X64
|
||||
#undef _ATOMIC_STORE_32_SEQ_CST_X86_X64
|
||||
#undef _ATOMIC_STORE_SEQ_CST
|
||||
#undef _ATOMIC_STORE_32_SEQ_CST
|
||||
#undef _ATOMIC_STORE_64_SEQ_CST
|
||||
#undef _ATOMIC_STORE_64_SEQ_CST_IX86
|
||||
#undef _ATOMIC_HAS_DCAS
|
||||
|
||||
#undef _STD_COMPARE_EXCHANGE_128
|
||||
|
|
|
@ -3917,7 +3917,7 @@ public:
|
|||
}
|
||||
|
||||
shared_ptr<_Ty> exchange(shared_ptr<_Ty> _Value, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
_Check_memory_order(_Order);
|
||||
_Check_memory_order(static_cast<unsigned int>(_Order));
|
||||
shared_ptr<_Ty> _Result;
|
||||
_Result._Rep = this->_Repptr._Lock_and_load();
|
||||
_Result._Ptr = this->_Ptr.load(memory_order_relaxed);
|
||||
|
@ -3945,7 +3945,7 @@ public:
|
|||
|
||||
bool compare_exchange_strong(shared_ptr<_Ty>& _Expected, shared_ptr<_Ty> _Desired,
|
||||
const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
_Check_memory_order(_Order);
|
||||
_Check_memory_order(static_cast<unsigned int>(_Order));
|
||||
auto _Rep = this->_Repptr._Lock_and_load();
|
||||
if (this->_Ptr.load(memory_order_relaxed) == _Expected._Ptr && _Rep == _Expected._Rep) {
|
||||
remove_extent_t<_Ty>* const _Tmp = _Desired._Ptr;
|
||||
|
@ -4036,7 +4036,7 @@ public:
|
|||
}
|
||||
|
||||
weak_ptr<_Ty> exchange(weak_ptr<_Ty> _Value, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
_Check_memory_order(_Order);
|
||||
_Check_memory_order(static_cast<unsigned int>(_Order));
|
||||
weak_ptr<_Ty> _Result;
|
||||
_Result._Rep = this->_Repptr._Lock_and_load();
|
||||
_Result._Ptr = this->_Ptr.load(memory_order_relaxed);
|
||||
|
@ -4064,7 +4064,7 @@ public:
|
|||
|
||||
bool compare_exchange_strong(
|
||||
weak_ptr<_Ty>& _Expected, weak_ptr<_Ty> _Desired, const memory_order _Order = memory_order_seq_cst) noexcept {
|
||||
_Check_memory_order(_Order);
|
||||
_Check_memory_order(static_cast<unsigned int>(_Order));
|
||||
auto _Rep = this->_Repptr._Lock_and_load();
|
||||
if (this->_Ptr.load(memory_order_relaxed) == _Expected._Ptr && _Rep == _Expected._Rep) {
|
||||
remove_extent_t<_Ty>* const _Tmp = _Desired._Ptr;
|
||||
|
|
Загрузка…
Ссылка в новой задаче