Use casts for most common atomic cases (#227)

Resolves #85 / DevCom-706195

Casey applying the new atomic implementation fixed us breaking the
strict aliasing rules, but the memcpy is causing a code size regression
for non-`/Oi` customers. This change should restore code size for the
most common uses of atomic, which are `atomic<integral>` and
`atomic<pointer>`.
This commit is contained in:
Billy O'Neal 2019-10-28 15:49:58 -07:00 коммит произвёл GitHub
Родитель 04cf94886a
Коммит 5bf80b41d1
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
1 изменённых файлов: 12 добавлений и 3 удалений

Просмотреть файл

@ -238,10 +238,19 @@ template <class _Integral, class _Ty>
_NODISCARD _Integral _Atomic_reinterpret_as(const _Ty& _Source) noexcept { _NODISCARD _Integral _Atomic_reinterpret_as(const _Ty& _Source) noexcept {
// interprets _Source as the supplied integral type // interprets _Source as the supplied integral type
static_assert(is_integral_v<_Integral>, "Tried to reinterpret memory as non-integral"); static_assert(is_integral_v<_Integral>, "Tried to reinterpret memory as non-integral");
#if _HAS_IF_CONSTEXPR
if constexpr (is_integral_v<_Ty> && sizeof(_Integral) == sizeof(_Ty)) {
return static_cast<_Integral>(_Source);
} else if constexpr (is_pointer_v<_Ty> && sizeof(_Integral) == sizeof(_Ty)) {
return reinterpret_cast<_Integral>(_Source);
} else
#endif // _HAS_IF_CONSTEXPR
{
_Integral _Result{}; // zero padding bits _Integral _Result{}; // zero padding bits
_CSTD memcpy(&_Result, _STD addressof(_Source), sizeof(_Source)); _CSTD memcpy(&_Result, _STD addressof(_Source), sizeof(_Source));
return _Result; return _Result;
} }
}
// FUNCTION _Load_barrier // FUNCTION _Load_barrier
inline void _Load_barrier(const memory_order _Order) noexcept { // implement memory barrier for atomic load functions inline void _Load_barrier(const memory_order _Order) noexcept { // implement memory barrier for atomic load functions