Bug 1437731 - Convert a |uint16_t * uint16_t| to |1U * uint16_t * uint16_t| to avoid integer promotion of the original two operands resulting in signed integer overflow given the right runtime inputs. r=froydnj

--HG--
extra : rebase_source : acbc3eb181e8a044f0ffd8c5b0a7c96b59f57ffe
This commit is contained in:
Jeff Walden 2018-02-13 08:59:54 -08:00
Родитель 792f264ae3
Коммит abf8ff7bfe
1 изменённых файлов: 9 добавлений и 1 удалений

Просмотреть файл

@ -755,7 +755,15 @@ struct Abs {
template<typename T>
struct Neg {
using MaybeUnsignedT = typename detail::MaybeMakeUnsigned<T>::Type;
static T apply(T x) { return MaybeUnsignedT(-1) * MaybeUnsignedT(x); }
static T apply(T x) {
// Prepend |1U| to force integral promotion through *unsigned* types.
// Otherwise when |T = uint16_t| and |int| is 32-bit, we could have
// |uint16_t(-1) * uint16_t(65535)| which would really be
// |int(65535) * int(65535)|, but as |4294836225 > 2147483647| would
// perform signed integer overflow.
// https://stackoverflow.com/questions/24795651/whats-the-best-c-way-to-multiply-unsigned-integers-modularly-safely
return static_cast<MaybeUnsignedT>(1U * MaybeUnsignedT(-1) * MaybeUnsignedT(x));
}
};
template<typename T>
struct Not {