Merge pull request #38 from Microsoft/clang-tidy

Clang tidy
This commit is contained in:
David Chisnall 2019-04-30 09:41:12 -04:00 коммит произвёл GitHub
Родитель 09c884b553 8f082c6a9c
Коммит 747063db87
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
34 изменённых файлов: 290 добавлений и 203 удалений

9
.clang-tidy Normal file
Просмотреть файл

@ -0,0 +1,9 @@
Checks: '-clang-analyzer-security.insecureAPI.bzero,clang-diagnostic-*,google-readability-casting,readability-else-after-return,performance-unnecessary-copy-initialization,bugprone-use-after-move,modernize-use-nullptr,modernize-redundant-void-arg,modernize-return-braced-init-list,modernize-use-default-member-init,modernize-use-equals-default,modernize-use-equals-delete,modernize-use-nodiscard,modernize-use-override,cppcoreguidelines-avoid-goto,misc-unconventional-assign-operator,cppcoreguidelines-narrowing-conversions,bugprone-assert-side-effect,bugprone-bool-pointer-implicit-conversion,bugprone-copy-constructor-init,bugprone-forward-declaration-namespace,bugprone-forwarding-reference-overload,bugprone-macro-parentheses,bugprone-macro-repeated-side-effects,bugprone-move-forwarding-reference,bugprone-misplaced-widening-cast,bugprone-swapped-arguments,bugprone-undelegated-constructor,bugprone-unused-raii,cert-dcl21-cpp,llvm-namespace-comment,misc-static-assert,misc-redundant-expression,modernize-loop-convert,modernize-use-using,performance-noexcept-move-constructor,readability-non-const-parameter'
# It would be nice to enable:
# - readability-magic-numbers
# - modernize-avoid-c-arrays
# - cppcoreguidelines-pro-bounds-array-to-pointer-decay (assert breaks it).
# - readability-braces-around-statements (mostly works, but is very confused by constexpr if).
CheckOptions:
- key: modernize-use-default-member-init.UseAssignment
value: '1'

Просмотреть файл

@ -110,4 +110,4 @@ namespace snmalloc
#endif
}
};
}
} // namespace snmalloc

42
src/ds/address.h Normal file
Просмотреть файл

@ -0,0 +1,42 @@
#pragma once
#include <cstdint>
namespace snmalloc
{
/**
* The type used for an address. Currently, all addresses are assumed to be
* provenance-carrying values and so it is possible to cast back from the
* result of arithmetic on an address_t. Eventually, this will want to be
* separated into two types, one for raw addresses and one for addresses that
* can be cast back to pointers.
*/
using address_t = uintptr_t;
/**
* Perform pointer arithmetic and return the adjusted pointer.
*/
template<typename T>
inline T* pointer_offset(T* base, size_t diff)
{
return reinterpret_cast<T*>(reinterpret_cast<char*>(base) + diff);
}
/**
* Cast from a pointer type to an address.
*/
template<typename T>
inline address_t address_cast(T* ptr)
{
return reinterpret_cast<address_t>(ptr);
}
/**
* Cast from an address back to a pointer of the specified type. All uses of
* this will eventually need auditing for CHERI compatibility.
*/
template<typename T>
inline T* pointer_cast(address_t address)
{
return reinterpret_cast<T*>(address);
}
} // namespace snmalloc

Просмотреть файл

@ -40,10 +40,21 @@
# define __has_builtin(x) 0
#endif
#define UNUSED(x) ((void)x)
#define UNUSED(x) ((void)(x))
#if __has_builtin(__builtin_assume)
# define SNMALLOC_ASSUME(x) __builtin_assume(x)
#else
# define SNMALLOC_ASSUME(x) \
do \
{ \
} while (0)
#endif
// #define USE_LZCNT
#include "address.h"
#include <atomic>
#include <cassert>
#include <cstdint>
@ -74,6 +85,20 @@ namespace snmalloc
return BITS == 64;
}
/**
* Returns a value of type T that has a single bit set,
*
* S is a template parameter because callers use either `int` or `size_t`
* and either is valid to represent a number in the range 0-63 (or 0-127 if
* we want to use `__uint128_t` as `T`).
*/
template<typename T = size_t, typename S>
constexpr T one_at_bit(S shift)
{
static_assert(std::is_integral_v<T>, "Type must be integral");
return (static_cast<T>(1)) << shift;
}
static constexpr size_t ADDRESS_BITS = is64() ? 48 : 32;
inline void pause()
@ -164,20 +189,22 @@ namespace snmalloc
return BITS - index - 1;
# endif
#else
return (size_t)__builtin_clzl(x);
return static_cast<size_t>(__builtin_clzl(x));
#endif
}
inline constexpr size_t rotr_const(size_t x, size_t n)
{
size_t nn = n & (BITS - 1);
return (x >> nn) | (x << (((size_t) - (int)nn) & (BITS - 1)));
return (x >> nn) |
(x << ((static_cast<size_t>(-static_cast<int>(nn))) & (BITS - 1)));
}
inline constexpr size_t rotl_const(size_t x, size_t n)
{
size_t nn = n & (BITS - 1);
return (x << nn) | (x >> (((size_t) - (int)nn) & (BITS - 1)));
return (x << nn) |
(x >> ((static_cast<size_t>(-static_cast<int>(nn))) & (BITS - 1)));
}
inline size_t rotr(size_t x, size_t n)
@ -212,7 +239,7 @@ namespace snmalloc
for (int i = BITS - 1; i >= 0; i--)
{
size_t mask = (size_t)1 << i;
size_t mask = one_at_bit(i);
if ((x & mask) == mask)
return n;
@ -232,7 +259,7 @@ namespace snmalloc
return _tzcnt_u32((uint32_t)x);
# endif
#else
return (size_t)__builtin_ctzl(x);
return static_cast<size_t>(__builtin_ctzl(x));
#endif
}
@ -242,7 +269,7 @@ namespace snmalloc
for (size_t i = 0; i < BITS; i++)
{
size_t mask = (size_t)1 << i;
size_t mask = one_at_bit(i);
if ((x & mask) == mask)
return n;
@ -283,7 +310,7 @@ namespace snmalloc
if (x <= 2)
return x;
return (size_t)1 << (BITS - clz(x - 1));
return one_at_bit(BITS - clz(x - 1));
}
inline size_t next_pow2_bits(size_t x)
@ -298,7 +325,7 @@ namespace snmalloc
if (x <= 2)
return x;
return (size_t)1 << (BITS - clz_const(x - 1));
return one_at_bit(BITS - clz_const(x - 1));
}
constexpr size_t next_pow2_bits_const(size_t x)
@ -308,7 +335,7 @@ namespace snmalloc
inline static size_t hash(void* p)
{
size_t x = (size_t)p;
size_t x = static_cast<size_t>(address_cast(p));
if (is64())
{
@ -357,7 +384,8 @@ namespace snmalloc
{
assert(next_pow2(alignment) == alignment);
return (((size_t)p | size) & (alignment - 1)) == 0;
return ((static_cast<size_t>(address_cast(p)) | size) &
(alignment - 1)) == 0;
}
template<class T>
@ -369,8 +397,8 @@ namespace snmalloc
using S = std::make_signed_t<T>;
constexpr S shift = (sizeof(S) * 8) - 1;
S a = (S)(v + 1);
S b = (S)(mod - a - 1);
S a = static_cast<S>(v + 1);
S b = static_cast<S>(mod - a - 1);
return a & ~(b >> shift);
}
@ -405,8 +433,8 @@ namespace snmalloc
template<size_t MANTISSA_BITS, size_t LOW_BITS = 0>
static size_t to_exp_mant(size_t value)
{
size_t LEADING_BIT = ((size_t)1 << (MANTISSA_BITS + LOW_BITS)) >> 1;
size_t MANTISSA_MASK = ((size_t)1 << MANTISSA_BITS) - 1;
size_t LEADING_BIT = one_at_bit(MANTISSA_BITS + LOW_BITS) >> 1;
size_t MANTISSA_MASK = one_at_bit(MANTISSA_BITS) - 1;
value = value - 1;
@ -421,8 +449,8 @@ namespace snmalloc
template<size_t MANTISSA_BITS, size_t LOW_BITS = 0>
constexpr static size_t to_exp_mant_const(size_t value)
{
size_t LEADING_BIT = ((size_t)1 << (MANTISSA_BITS + LOW_BITS)) >> 1;
size_t MANTISSA_MASK = ((size_t)1 << MANTISSA_BITS) - 1;
size_t LEADING_BIT = one_at_bit(MANTISSA_BITS + LOW_BITS) >> 1;
size_t MANTISSA_MASK = one_at_bit(MANTISSA_BITS) - 1;
value = value - 1;
@ -440,18 +468,16 @@ namespace snmalloc
if (MANTISSA_BITS > 0)
{
m_e = m_e + 1;
size_t MANTISSA_MASK = ((size_t)1 << MANTISSA_BITS) - 1;
size_t MANTISSA_MASK = one_at_bit(MANTISSA_BITS) - 1;
size_t m = m_e & MANTISSA_MASK;
size_t e = m_e >> MANTISSA_BITS;
size_t b = e == 0 ? 0 : 1;
size_t shifted_e = e - b;
size_t extended_m = (m + ((size_t)b << MANTISSA_BITS));
size_t extended_m = (m + (b << MANTISSA_BITS));
return extended_m << (shifted_e + LOW_BITS);
}
else
{
return (size_t)1 << (m_e + LOW_BITS);
}
return one_at_bit(m_e + LOW_BITS);
}
/**
@ -477,5 +503,5 @@ namespace snmalloc
{
return t1 > t2 ? t1 : t2;
}
}
}
} // namespace bits
} // namespace snmalloc

Просмотреть файл

@ -52,4 +52,4 @@ namespace snmalloc
return *this;
}
};
}
} // namespace snmalloc

Просмотреть файл

@ -10,7 +10,7 @@ namespace snmalloc
* Invalid pointer class. This is similar to `std::nullptr_t`, but allows
* other values.
*/
template<uintptr_t Sentinel>
template<address_t Sentinel>
struct InvalidPointer
{
/**
@ -40,10 +40,17 @@ namespace snmalloc
* systems the sentinel should be a value in unmapped memory.
*/
template<typename T>
operator T*()
operator T*() const
{
return reinterpret_cast<T*>(Sentinel);
}
/**
* Implicit conversion to an address, returns the sentinel value.
*/
operator address_t() const
{
return Sentinel;
}
};
template<class T, class Terminator = std::nullptr_t>
@ -162,4 +169,4 @@ namespace snmalloc
#endif
}
};
}
} // namespace snmalloc

Просмотреть файл

@ -21,4 +21,4 @@ namespace snmalloc
lock.clear(std::memory_order_release);
}
};
}
} // namespace snmalloc

Просмотреть файл

@ -50,13 +50,13 @@ namespace snmalloc
public:
operator T()
{
return (T)(value & (length - 1));
return static_cast<T>(value & (length - 1));
}
T& operator=(const T v)
Mod& operator=(const T v)
{
value = v;
return value;
return *this;
}
};
@ -77,4 +77,4 @@ namespace snmalloc
return array[i & (rlength - 1)];
}
};
}
} // namespace snmalloc

Просмотреть файл

@ -72,4 +72,4 @@ namespace snmalloc
return top;
}
};
}
} // namespace snmalloc

Просмотреть файл

@ -79,4 +79,4 @@ namespace snmalloc
return nullptr;
}
};
}
} // namespace snmalloc

Просмотреть файл

@ -133,7 +133,7 @@ namespace snmalloc
/**
* Get the pagemap entry corresponding to a specific address.
*/
uint8_t get(uintptr_t p)
uint8_t get(address_t p)
{
return PagemapProvider::pagemap().get(p);
}
@ -143,7 +143,7 @@ namespace snmalloc
*/
uint8_t get(void* p)
{
return get((uintptr_t)p);
return get(address_cast(p));
}
/**
@ -152,14 +152,14 @@ namespace snmalloc
*/
void set_slab(Superslab* slab)
{
set(slab, (size_t)PMSuperslab);
set(slab, static_cast<size_t>(PMSuperslab));
}
/**
* Add a pagemap entry indicating that a medium slab has been allocated.
*/
void set_slab(Mediumslab* slab)
{
set(slab, (size_t)PMMediumslab);
set(slab, static_cast<size_t>(PMMediumslab));
}
/**
* Remove an entry from the pagemap corresponding to a superslab.
@ -167,7 +167,7 @@ namespace snmalloc
void clear_slab(Superslab* slab)
{
assert(get(slab) == PMSuperslab);
set(slab, (size_t)PMNotOurs);
set(slab, static_cast<size_t>(PMNotOurs));
}
/**
* Remove an entry corresponding to a medium slab.
@ -175,7 +175,7 @@ namespace snmalloc
void clear_slab(Mediumslab* slab)
{
assert(get(slab) == PMMediumslab);
set(slab, (size_t)PMNotOurs);
set(slab, static_cast<size_t>(PMNotOurs));
}
/**
* Update the pagemap to reflect a large allocation, of `size` bytes from
@ -184,17 +184,18 @@ namespace snmalloc
void set_large_size(void* p, size_t size)
{
size_t size_bits = bits::next_pow2_bits(size);
set(p, (uint8_t)size_bits);
set(p, static_cast<uint8_t>(size_bits));
// Set redirect slide
uintptr_t ss = (uintptr_t)p + SUPERSLAB_SIZE;
auto ss = address_cast(p) + SUPERSLAB_SIZE;
for (size_t i = 0; i < size_bits - SUPERSLAB_BITS; i++)
{
size_t run = 1ULL << i;
PagemapProvider::pagemap().set_range(
ss, (uint8_t)(64 + i + SUPERSLAB_BITS), run);
ss, static_cast<uint8_t>(64 + i + SUPERSLAB_BITS), run);
ss = ss + SUPERSLAB_SIZE * run;
}
PagemapProvider::pagemap().set((uintptr_t)p, (uint8_t)size_bits);
PagemapProvider::pagemap().set(
address_cast(p), static_cast<uint8_t>(size_bits));
}
/**
* Update the pagemap to remove a large allocation, of `size` bytes from
@ -202,7 +203,7 @@ namespace snmalloc
*/
void clear_large_size(void* vp, size_t size)
{
uintptr_t p = (uintptr_t)vp;
auto p = address_cast(vp);
size_t rounded_size = bits::next_pow2(size);
assert(get(p) == bits::next_pow2_bits(size));
auto count = rounded_size >> SUPERSLAB_BITS;
@ -217,7 +218,7 @@ namespace snmalloc
*/
void set(void* p, uint8_t x)
{
PagemapProvider::pagemap().set((uintptr_t)p, x);
PagemapProvider::pagemap().set(address_cast(p), x);
}
};
@ -326,15 +327,14 @@ namespace snmalloc
size_t rsize = sizeclass_to_size(sizeclass);
return small_alloc<zero_mem, allow_reserve>(sizeclass, rsize);
}
else if (sizeclass < NUM_SIZECLASSES)
if (sizeclass < NUM_SIZECLASSES)
{
size_t rsize = sizeclass_to_size(sizeclass);
return medium_alloc<zero_mem, allow_reserve>(sizeclass, rsize, size);
}
else
{
return large_alloc<zero_mem, allow_reserve>(size);
}
return large_alloc<zero_mem, allow_reserve>(size);
#endif
}
@ -427,7 +427,7 @@ namespace snmalloc
// Free memory of an unknown size. Must be called with an external
// pointer.
uint8_t size = pagemap().get((uintptr_t)p);
uint8_t size = pagemap().get(address_cast(p));
if (size == 0)
{
@ -453,7 +453,7 @@ namespace snmalloc
remote_dealloc(target, p, sizeclass);
return;
}
else if (size == PMMediumslab)
if (size == PMMediumslab)
{
Mediumslab* slab = Mediumslab::get(p);
RemoteAllocator* target = slab->get_allocator();
@ -470,7 +470,7 @@ namespace snmalloc
}
# ifndef SNMALLOC_SAFE_CLIENT
if (size > 64 || (uintptr_t)super != (uintptr_t)p)
if (size > 64 || address_cast(super) != address_cast(p))
{
error("Not deallocating start of an object");
}
@ -480,13 +480,13 @@ namespace snmalloc
}
template<Boundary location = Start>
static uintptr_t external_uintptr(void* p)
static address_t external_address(void* p)
{
#ifdef USE_MALLOC
error("Unsupported");
UNUSED(p);
#else
uint8_t size = global_pagemap.get((uintptr_t)p);
uint8_t size = global_pagemap.get(address_cast(p));
Superslab* super = Superslab::get(p);
if (size == PMSuperslab)
@ -495,21 +495,22 @@ namespace snmalloc
Metaslab& meta = super->get_meta(slab);
uint8_t sc = meta.sizeclass;
size_t slab_end = (size_t)slab + SLAB_SIZE;
size_t slab_end = static_cast<size_t>(address_cast(slab) + SLAB_SIZE);
return external_pointer<location>(p, sc, slab_end);
}
else if (size == PMMediumslab)
if (size == PMMediumslab)
{
Mediumslab* slab = Mediumslab::get(p);
uint8_t sc = slab->get_sizeclass();
size_t slab_end = (size_t)slab + SUPERSLAB_SIZE;
size_t slab_end =
static_cast<size_t>(address_cast(slab) + SUPERSLAB_SIZE);
return external_pointer<location>(p, sc, slab_end);
}
uintptr_t ss = (uintptr_t)super;
auto ss = address_cast(super);
while (size > 64)
{
@ -541,13 +542,13 @@ namespace snmalloc
template<Boundary location = Start>
static void* external_pointer(void* p)
{
return (void*)external_uintptr<location>(p);
return pointer_cast<void>(external_address<location>(p));
}
static size_t alloc_size(void* p)
{
// This must be called on an external pointer.
size_t size = global_pagemap.get((uintptr_t)p);
size_t size = global_pagemap.get(address_cast(p));
if (size == 0)
{
@ -625,7 +626,7 @@ namespace snmalloc
{
this->size += sizeclass_to_size(sizeclass);
Remote* r = (Remote*)p;
Remote* r = static_cast<Remote*>(p);
r->set_target_id(target_id);
assert(r->target_id() == target_id);
@ -762,7 +763,7 @@ namespace snmalloc
remote_alloc = r;
}
if (id() >= (alloc_id_t)-1)
if (id() >= static_cast<alloc_id_t>(-1))
error("Id should not be -1");
init_message_queue();
@ -799,7 +800,8 @@ namespace snmalloc
size_t end_point_correction = location == End ?
(end_point - 1) :
(location == OnePastEnd ? end_point : (end_point - rsize));
size_t offset_from_end = (end_point - 1) - (size_t)p;
size_t offset_from_end =
(end_point - 1) - static_cast<size_t>(address_cast(p));
size_t end_to_end = round_by_sizeclass(rsize, offset_from_end);
return end_point_correction - end_to_end;
}
@ -884,8 +886,9 @@ namespace snmalloc
if (super != nullptr)
return super;
super = (Superslab*)large_allocator.template alloc<NoZero, allow_reserve>(
0, SUPERSLAB_SIZE);
super = reinterpret_cast<Superslab*>(
large_allocator.template alloc<NoZero, allow_reserve>(
0, SUPERSLAB_SIZE));
if ((allow_reserve == NoReserve) && (super == nullptr))
return super;
@ -1071,7 +1074,7 @@ namespace snmalloc
if constexpr (decommit_strategy == DecommitSuper)
{
large_allocator.memory_provider.notify_not_using(
(void*)((size_t)super + OS_PAGE_SIZE),
pointer_offset(super, OS_PAGE_SIZE),
SUPERSLAB_SIZE - OS_PAGE_SIZE);
}
else if constexpr (decommit_strategy == DecommitSuperLazy)
@ -1118,9 +1121,9 @@ namespace snmalloc
}
else
{
slab =
(Mediumslab*)large_allocator.template alloc<NoZero, allow_reserve>(
0, SUPERSLAB_SIZE);
slab = reinterpret_cast<Mediumslab*>(
large_allocator.template alloc<NoZero, allow_reserve>(
0, SUPERSLAB_SIZE));
if ((allow_reserve == NoReserve) && (slab == nullptr))
return nullptr;
@ -1146,7 +1149,7 @@ namespace snmalloc
#ifndef SNMALLOC_SAFE_CLIENT
if (!is_multiple_of_sizeclass(
sizeclass_to_size(sizeclass),
(uintptr_t)slab + SUPERSLAB_SIZE - (uintptr_t)p))
address_cast(slab) + SUPERSLAB_SIZE - address_cast(p)))
{
error("Not deallocating start of an object");
}
@ -1164,8 +1167,7 @@ namespace snmalloc
if constexpr (decommit_strategy == DecommitSuper)
{
large_allocator.memory_provider.notify_not_using(
(void*)((size_t)slab + OS_PAGE_SIZE),
SUPERSLAB_SIZE - OS_PAGE_SIZE);
pointer_offset(slab, OS_PAGE_SIZE), SUPERSLAB_SIZE - OS_PAGE_SIZE);
}
pagemap().clear_slab(slab);
@ -1209,7 +1211,7 @@ namespace snmalloc
MEASURE_TIME(large_dealloc, 4, 16);
size_t size_bits = bits::next_pow2_bits(size);
size_t rsize = (size_t)1 << size_bits;
size_t rsize = bits::one_at_bit(size_bits);
assert(rsize >= SUPERSLAB_SIZE);
size_t large_class = size_bits - SUPERSLAB_BITS;
@ -1219,10 +1221,10 @@ namespace snmalloc
if ((decommit_strategy != DecommitNone) || (large_class > 0))
large_allocator.memory_provider.notify_not_using(
(void*)((size_t)p + OS_PAGE_SIZE), rsize - OS_PAGE_SIZE);
pointer_offset(p, OS_PAGE_SIZE), rsize - OS_PAGE_SIZE);
// Initialise in order to set the correct SlabKind.
Largeslab* slab = (Largeslab*)p;
Largeslab* slab = static_cast<Largeslab*>(p);
slab->init();
large_allocator.dealloc(slab, large_class);
}
@ -1248,4 +1250,4 @@ namespace snmalloc
return page_map;
}
};
}
} // namespace snmalloc

Просмотреть файл

@ -143,4 +143,4 @@ namespace snmalloc
"SLAB_COUNT must be a power of 2");
static_assert(
SLAB_COUNT <= (UINT8_MAX + 1), "SLAB_COUNT must fit in a uint8_t");
};
} // namespace snmalloc

Просмотреть файл

@ -16,4 +16,4 @@ namespace snmalloc
return allocator;
}
};
}
} // namespace snmalloc

Просмотреть файл

@ -82,7 +82,8 @@ namespace snmalloc
if (slab_count.current != 0)
{
double occupancy = (double)count.current / (double)slab_count.current;
double occupancy = static_cast<double>(count.current) /
static_cast<double>(slab_count.current);
uint64_t duration = now - time;
if (ticks == 0)
@ -103,7 +104,7 @@ namespace snmalloc
// Keep in sync with header lower down
count.print(csv, multiplier);
slab_count.print(csv, slab_multiplier);
size_t average = (size_t)(online_average * multiplier);
size_t average = static_cast<size_t>(online_average * multiplier);
csv << average << (slab_multiplier - average) * slab_count.max
<< csv.endl;
@ -116,7 +117,7 @@ namespace snmalloc
static constexpr size_t BUCKETS = 1 << BUCKETS_BITS;
static constexpr size_t TOTAL_BUCKETS =
bits::to_exp_mant_const<BUCKETS_BITS>(
((size_t)1 << (bits::ADDRESS_BITS - 1)));
bits::one_at_bit(bits::ADDRESS_BITS - 1));
Stats sizeclass[N];
Stats large[LARGE_N];
@ -388,4 +389,4 @@ namespace snmalloc
}
#endif
};
}
} // namespace snmalloc

Просмотреть файл

@ -29,4 +29,4 @@ namespace snmalloc
return kind;
}
};
}
} // namespace snmalloc

Просмотреть файл

@ -19,7 +19,7 @@ namespace snmalloc
sizeof(AllocPool) == sizeof(Parent),
"You cannot add fields to this class.");
// This cast is safe due to the static assert.
return (AllocPool*)Parent::make(mp);
return static_cast<AllocPool*>(Parent::make(mp));
}
static AllocPool* make() noexcept
@ -176,4 +176,4 @@ namespace snmalloc
}
using Alloc = Allocator<GlobalVirtual>;
}
} // namespace snmalloc

Просмотреть файл

@ -56,7 +56,7 @@ namespace snmalloc
class MemoryProviderStateMixin : public PAL
{
std::atomic_flag lock = ATOMIC_FLAG_INIT;
size_t bump;
address_t bump;
size_t remaining;
void new_block()
@ -69,7 +69,7 @@ namespace snmalloc
PAL::template notify_using<NoZero>(r, OS_PAGE_SIZE);
bump = (size_t)r;
bump = address_cast(r);
remaining = size;
}
@ -98,7 +98,7 @@ namespace snmalloc
{
break;
}
size_t rsize = ((size_t)1 << SUPERSLAB_BITS) << large_class;
size_t rsize = bits::one_at_bit(SUPERSLAB_BITS) << large_class;
size_t decommit_size = rsize - OS_PAGE_SIZE;
// Grab all of the chunks of this size class.
auto* slab = large_stack[large_class].pop_all();
@ -108,7 +108,8 @@ namespace snmalloc
// the stack.
if (slab->get_kind() != Decommitted)
{
PAL::notify_not_using(((char*)slab) + OS_PAGE_SIZE, decommit_size);
PAL::notify_not_using(
pointer_offset(slab, OS_PAGE_SIZE), decommit_size);
}
// Once we've removed these from the stack, there will be no
// concurrent accesses and removal should have established a
@ -158,16 +159,16 @@ namespace snmalloc
new_block();
}
p = (void*)bump;
p = pointer_cast<void>(bump);
bump += size;
remaining -= size;
}
auto page_start = bits::align_down((size_t)p, OS_PAGE_SIZE);
auto page_end = bits::align_up((size_t)p + size, OS_PAGE_SIZE);
auto page_start = bits::align_down(address_cast(p), OS_PAGE_SIZE);
auto page_end = bits::align_up(address_cast(p) + size, OS_PAGE_SIZE);
PAL::template notify_using<NoZero>(
(void*)page_start, page_end - page_start);
pointer_cast<void>(page_start), page_end - page_start);
return new (p) T(std::forward<Args...>(args)...);
}
@ -217,17 +218,16 @@ namespace snmalloc
void* p = PAL::template reserve<committed>(&request);
*size = request;
uintptr_t p0 = (uintptr_t)p;
uintptr_t start = bits::align_up(p0, align);
auto p0 = address_cast(p);
auto start = bits::align_up(p0, align);
if (start > (uintptr_t)p0)
if (start > p0)
{
uintptr_t end = bits::align_down(p0 + request, align);
*size = end - start;
PAL::notify_not_using(p, start - p0);
PAL::notify_not_using(
reinterpret_cast<void*>(end), (p0 + request) - end);
p = reinterpret_cast<void*>(start);
PAL::notify_not_using(pointer_cast<void>(end), (p0 + request) - end);
p = pointer_cast<void>(start);
}
return p;
}
@ -294,16 +294,16 @@ namespace snmalloc
template<AllowReserve allow_reserve>
bool reserve_memory(size_t need, size_t add)
{
if (((size_t)reserved_start + need) > (size_t)reserved_end)
if ((address_cast(reserved_start) + need) > address_cast(reserved_end))
{
if constexpr (allow_reserve == YesReserve)
{
stats.segment_create();
reserved_start =
memory_provider.template reserve<false>(&add, SUPERSLAB_SIZE);
reserved_end = (void*)((size_t)reserved_start + add);
reserved_start =
(void*)bits::align_up((size_t)reserved_start, SUPERSLAB_SIZE);
reserved_end = pointer_offset(reserved_start, add);
reserved_start = pointer_cast<void>(
bits::align_up(address_cast(reserved_start), SUPERSLAB_SIZE));
if (add < need)
return false;
@ -320,7 +320,7 @@ namespace snmalloc
template<ZeroMem zero_mem = NoZero, AllowReserve allow_reserve = YesReserve>
void* alloc(size_t large_class, size_t size)
{
size_t rsize = ((size_t)1 << SUPERSLAB_BITS) << large_class;
size_t rsize = bits::one_at_bit(SUPERSLAB_BITS) << large_class;
if (size == 0)
size = rsize;
@ -340,8 +340,8 @@ namespace snmalloc
if (!reserve_memory<allow_reserve>(rsize, add))
return nullptr;
p = (void*)reserved_start;
reserved_start = (void*)((size_t)p + rsize);
p = reserved_start;
reserved_start = pointer_offset(p, rsize);
// All memory is zeroed since it comes from reserved space.
memory_provider.template notify_using<NoZero>(p, size);
@ -361,7 +361,7 @@ namespace snmalloc
// Passing zero_mem ensures the PAL provides zeroed pages if
// required.
memory_provider.template notify_using<zero_mem>(
(void*)((size_t)p + OS_PAGE_SIZE),
pointer_offset(p, OS_PAGE_SIZE),
bits::align_up(size, OS_PAGE_SIZE) - OS_PAGE_SIZE);
}
else
@ -381,7 +381,7 @@ namespace snmalloc
// Notify we are using the rest of the allocation.
// Passing zero_mem ensures the PAL provides zeroed pages if required.
memory_provider.template notify_using<zero_mem>(
(void*)((size_t)p + OS_PAGE_SIZE),
pointer_offset(p, OS_PAGE_SIZE),
bits::align_up(size, OS_PAGE_SIZE) - OS_PAGE_SIZE);
}
else
@ -409,4 +409,4 @@ namespace snmalloc
* passed as an argument.
*/
HEADER_GLOBAL GlobalVirtual default_memory_provider;
}
} // namespace snmalloc

Просмотреть файл

@ -41,7 +41,7 @@ namespace snmalloc
static Mediumslab* get(void* p)
{
return (Mediumslab*)((size_t)p & SUPERSLAB_MASK);
return pointer_cast<Mediumslab>(address_cast(p) & SUPERSLAB_MASK);
}
void init(RemoteAllocator* alloc, uint8_t sc, size_t rsize)
@ -57,11 +57,12 @@ namespace snmalloc
if ((kind != Medium) || (sizeclass != sc))
{
sizeclass = sc;
uint16_t ssize = (uint16_t)(rsize >> 8);
uint16_t ssize = static_cast<uint16_t>(rsize >> 8);
kind = Medium;
free = medium_slab_free(sc);
for (uint16_t i = free; i > 0; i--)
stack[free - i] = (uint16_t)((SUPERSLAB_SIZE >> 8) - (i * ssize));
stack[free - i] =
static_cast<uint16_t>((SUPERSLAB_SIZE >> 8) - (i * ssize));
}
else
{
@ -80,7 +81,7 @@ namespace snmalloc
assert(!full());
uint16_t index = stack[head++];
void* p = (void*)((size_t)this + ((size_t)index << 8));
void* p = pointer_offset(this, (static_cast<size_t>(index) << 8));
free--;
assert(bits::is_aligned_block<OS_PAGE_SIZE>(p, OS_PAGE_SIZE));
@ -124,7 +125,8 @@ namespace snmalloc
uint16_t pointer_to_index(void* p)
{
// Get the offset from the slab for a memory location.
return (uint16_t)(((size_t)p - (size_t)this) >> 8);
return static_cast<uint16_t>(
((address_cast(p) - address_cast(this))) >> 8);
}
};
}
} // namespace snmalloc

Просмотреть файл

@ -15,7 +15,7 @@ namespace snmalloc
Slab* get_slab()
{
return (Slab*)((size_t)this & SLAB_MASK);
return pointer_cast<Slab>(address_cast(this) & SLAB_MASK);
}
};
@ -26,7 +26,7 @@ namespace snmalloc
"Need to be able to pack a SlabLink into any free small alloc");
static constexpr uint16_t SLABLINK_INDEX =
(uint16_t)(SLAB_SIZE - sizeof(SlabLink));
static_cast<uint16_t>(SLAB_SIZE - sizeof(SlabLink));
// The Metaslab represent the status of a single slab.
// This can be either a short or a standard slab.
@ -90,22 +90,23 @@ namespace snmalloc
void set_full()
{
assert(head == 1);
head = (uint16_t)~0;
head = static_cast<uint16_t>(~0);
}
SlabLink* get_link(Slab* slab)
{
return (SlabLink*)((size_t)slab + link);
return reinterpret_cast<SlabLink*>(pointer_offset(slab, link));
}
bool valid_head(bool is_short)
{
size_t size = sizeclass_to_size(sizeclass);
size_t offset = get_slab_offset(sizeclass, is_short);
size_t all_high_bits = ~static_cast<size_t>(1);
size_t head_start =
remove_cache_friendly_offset(head & ~(size_t)1, sizeclass);
size_t slab_start = offset & ~(size_t)1;
remove_cache_friendly_offset(head & all_high_bits, sizeclass);
size_t slab_start = offset & all_high_bits;
return ((head_start - slab_start) % size) == 0;
}
@ -146,7 +147,7 @@ namespace snmalloc
if (curr == link)
break;
// Iterate bump/free list segment
curr = *(uint16_t*)((uintptr_t)slab + curr);
curr = *reinterpret_cast<uint16_t*>(pointer_offset(slab, curr));
}
// Check we terminated traversal on a correctly aligned block
@ -170,4 +171,4 @@ namespace snmalloc
#endif
}
};
}
} // namespace snmalloc

Просмотреть файл

@ -78,7 +78,7 @@ namespace snmalloc
(INDEX_LEVELS * BITS_PER_INDEX_LEVEL) + BITS_FOR_LEAF + GRANULARITY_BITS;
// Value used to represent when a node is being added too
static constexpr uintptr_t LOCKED_ENTRY = 1;
static constexpr InvalidPointer<1> LOCKED_ENTRY{};
struct Leaf
{
@ -112,14 +112,14 @@ namespace snmalloc
// to see that correctly.
PagemapEntry* value = e->load(std::memory_order_relaxed);
if ((uintptr_t)value <= LOCKED_ENTRY)
if ((value == nullptr) || (value == LOCKED_ENTRY))
{
if constexpr (create_addr)
{
value = nullptr;
if (e->compare_exchange_strong(
value, (PagemapEntry*)LOCKED_ENTRY, std::memory_order_relaxed))
value, LOCKED_ENTRY, std::memory_order_relaxed))
{
auto& v = default_memory_provider;
value = v.alloc_chunk<PagemapEntry, OS_PAGE_SIZE>();
@ -127,7 +127,7 @@ namespace snmalloc
}
else
{
while ((uintptr_t)e->load(std::memory_order_relaxed) ==
while (address_cast(e->load(std::memory_order_relaxed)) ==
LOCKED_ENTRY)
{
bits::pause();
@ -178,7 +178,7 @@ namespace snmalloc
break;
}
Leaf* leaf = (Leaf*)get_node<create_addr>(e, result);
Leaf* leaf = reinterpret_cast<Leaf*>(get_node<create_addr>(e, result));
if (!result)
return std::pair(nullptr, 0);
@ -281,6 +281,7 @@ namespace snmalloc
for (; ix < last; ix++)
{
SNMALLOC_ASSUME(leaf_ix.first != nullptr);
leaf_ix.first->values[ix] = x;
}
@ -356,4 +357,4 @@ namespace snmalloc
} while (length > 0);
}
};
}
} // namespace snmalloc

Просмотреть файл

@ -76,8 +76,8 @@ namespace snmalloc
// Returns a linked list of all objects in the stack, emptying the stack.
if (p == nullptr)
return stack.pop_all();
else
return p->next;
return p->next;
}
void restore(T* first, T* last)
@ -91,8 +91,8 @@ namespace snmalloc
{
if (p == nullptr)
return list;
else
return p->list_next;
return p->list_next;
}
};
}
} // namespace snmalloc

Просмотреть файл

@ -18,4 +18,4 @@ namespace snmalloc
/// Used by the pool to keep the list of all entries ever created.
T* list_next;
};
}
} // namespace snmalloc

Просмотреть файл

@ -47,4 +47,4 @@ namespace snmalloc
reinterpret_cast<uintptr_t>(&message_queue));
}
};
}
} // namespace snmalloc

Просмотреть файл

@ -15,7 +15,8 @@ namespace snmalloc
// Don't use sizeclasses that are not a multiple of the alignment.
// For example, 24 byte allocations can be
// problematic for some data due to alignment issues.
return (uint8_t)bits::to_exp_mant<INTERMEDIATE_BITS, MIN_ALLOC_BITS>(size);
return static_cast<uint8_t>(
bits::to_exp_mant<INTERMEDIATE_BITS, MIN_ALLOC_BITS>(size));
}
constexpr static inline uint8_t size_to_sizeclass_const(size_t size)
@ -23,18 +24,18 @@ namespace snmalloc
// Don't use sizeclasses that are not a multiple of the alignment.
// For example, 24 byte allocations can be
// problematic for some data due to alignment issues.
return (uint8_t)bits::to_exp_mant_const<INTERMEDIATE_BITS, MIN_ALLOC_BITS>(
size);
return static_cast<uint8_t>(
bits::to_exp_mant_const<INTERMEDIATE_BITS, MIN_ALLOC_BITS>(size));
}
constexpr static inline size_t large_sizeclass_to_size(uint8_t large_class)
{
return (size_t)1 << (large_class + SUPERSLAB_BITS);
return bits::one_at_bit(large_class + SUPERSLAB_BITS);
}
// Small classes range from [MIN, SLAB], i.e. inclusive.
static constexpr size_t NUM_SMALL_CLASSES =
size_to_sizeclass_const((size_t)1 << SLAB_BITS) + 1;
size_to_sizeclass_const(bits::one_at_bit(SLAB_BITS)) + 1;
static constexpr size_t NUM_SIZECLASSES =
size_to_sizeclass_const(SUPERSLAB_SIZE);
@ -89,7 +90,7 @@ namespace snmalloc
else
// Use 32-bit division as considerably faster than 64-bit, and
// everything fits into 32bits here.
return (uint32_t)(offset / rsize) * rsize;
return static_cast<uint32_t>(offset / rsize) * rsize;
}
inline static bool is_multiple_of_sizeclass(size_t rsize, size_t offset)
@ -137,7 +138,7 @@ namespace snmalloc
else
// Use 32-bit division as considerably faster than 64-bit, and
// everything fits into 32bits here.
return (uint32_t)(offset % rsize) == 0;
return static_cast<uint32_t>(offset % rsize) == 0;
}
#ifdef CACHE_FRIENDLY_OFFSET
@ -166,4 +167,4 @@ namespace snmalloc
return relative;
}
#endif
};
} // namespace snmalloc

Просмотреть файл

@ -30,7 +30,7 @@ namespace snmalloc
bits::from_exp_mant<INTERMEDIATE_BITS, MIN_ALLOC_BITS>(sizeclass);
size_t alignment = bits::min(
(size_t)1 << bits::ctz_const(size[sizeclass]), OS_PAGE_SIZE);
bits::one_at_bit(bits::ctz_const(size[sizeclass])), OS_PAGE_SIZE);
cache_friendly_mask[sizeclass] = (alignment - 1);
inverse_cache_friendly_mask[sizeclass] = ~(alignment - 1);
}
@ -41,15 +41,15 @@ namespace snmalloc
for (uint8_t i = 0; i < NUM_SMALL_CLASSES; i++)
{
short_bump_ptr_start[i] =
(uint16_t)(1 + (short_slab_size % size[i]) + header_size);
bump_ptr_start[i] = (uint16_t)(1 + (SLAB_SIZE % size[i]));
count_per_slab[i] = (uint16_t)(SLAB_SIZE / size[i]);
static_cast<uint16_t>(1 + (short_slab_size % size[i]) + header_size);
bump_ptr_start[i] = static_cast<uint16_t>(1 + (SLAB_SIZE % size[i]));
count_per_slab[i] = static_cast<uint16_t>(SLAB_SIZE / size[i]);
}
for (uint8_t i = NUM_SMALL_CLASSES; i < NUM_SIZECLASSES; i++)
{
medium_slab_slots[i - NUM_SMALL_CLASSES] =
(uint16_t)((SUPERSLAB_SIZE - Mediumslab::header_size()) / size[i]);
medium_slab_slots[i - NUM_SMALL_CLASSES] = static_cast<uint16_t>(
(SUPERSLAB_SIZE - Mediumslab::header_size()) / size[i]);
}
}
};
@ -60,8 +60,8 @@ namespace snmalloc
{
if (is_short)
return sizeclass_metadata.short_bump_ptr_start[sc];
else
return sizeclass_metadata.bump_ptr_start[sc];
return sizeclass_metadata.bump_ptr_start[sc];
}
constexpr static inline size_t sizeclass_to_size(uint8_t sizeclass)
@ -91,4 +91,4 @@ namespace snmalloc
return sizeclass_metadata
.medium_slab_slots[(sizeclass - NUM_SMALL_CLASSES)];
}
}
} // namespace snmalloc

Просмотреть файл

@ -10,13 +10,13 @@ namespace snmalloc
uint16_t pointer_to_index(void* p)
{
// Get the offset from the slab for a memory location.
return (uint16_t)((size_t)p - (size_t)this);
return static_cast<uint16_t>(address_cast(p) - address_cast(this));
}
public:
static Slab* get(void* p)
{
return (Slab*)((size_t)p & SLAB_MASK);
return pointer_cast<Slab>(address_cast(p) & SLAB_MASK);
}
Metaslab& get_meta()
@ -48,10 +48,10 @@ namespace snmalloc
if ((head & 1) == 0)
{
void* node = (void*)((size_t)this + head);
void* node = pointer_offset(this, head);
// Read the next slot from the memory that's about to be allocated.
uint16_t next = *(uint16_t*)node;
uint16_t next = *static_cast<uint16_t*>(node);
meta.head = next;
p = remove_cache_friendly_offset(node, meta.sizeclass);
@ -59,8 +59,8 @@ namespace snmalloc
else
{
// This slab is being bump allocated.
p = (void*)((size_t)this + head - 1);
meta.head = (head + (uint16_t)rsize) & (SLAB_SIZE - 1);
p = pointer_offset(this, head - 1);
meta.head = (head + static_cast<uint16_t>(rsize)) & (SLAB_SIZE - 1);
if (meta.head == 1)
{
meta.set_full();
@ -89,7 +89,7 @@ namespace snmalloc
Metaslab& meta = super->get_meta(this);
return is_multiple_of_sizeclass(
sizeclass_to_size(meta.sizeclass),
(uintptr_t)this + SLAB_SIZE - (uintptr_t)p);
address_cast(this) + SLAB_SIZE - address_cast(p));
}
// Returns true, if it alters get_status.
@ -123,8 +123,8 @@ namespace snmalloc
// Dealloc on the superslab.
if (is_short())
return super->dealloc_short_slab(memory_provider);
else
return super->dealloc_slab(this, memory_provider);
return super->dealloc_slab(this, memory_provider);
}
}
else if (meta.is_unused())
@ -134,8 +134,8 @@ namespace snmalloc
if (is_short())
return super->dealloc_short_slab(memory_provider);
else
return super->dealloc_slab(this, memory_provider);
return super->dealloc_slab(this, memory_provider);
}
else
{
@ -152,7 +152,7 @@ namespace snmalloc
assert(meta.valid_head(is_short()));
// Set the next pointer to the previous head.
*(uint16_t*)p = head;
*static_cast<uint16_t*>(p) = head;
meta.debug_slab_invariant(is_short(), this);
}
return Superslab::NoSlabReturn;
@ -160,7 +160,7 @@ namespace snmalloc
bool is_short()
{
return ((size_t)this & SUPERSLAB_MASK) == (size_t)this;
return (address_cast(this) & SUPERSLAB_MASK) == address_cast(this);
}
};
}
} // namespace snmalloc

Просмотреть файл

@ -65,4 +65,4 @@ namespace snmalloc
{
return SlowAllocator{};
}
}
} // namespace snmalloc

Просмотреть файл

@ -44,7 +44,7 @@ namespace snmalloc
// Used size_t as results in better code in MSVC
size_t slab_to_index(Slab* slab)
{
auto res = (((size_t)slab - (size_t)this) >> SLAB_BITS);
auto res = ((address_cast(slab) - address_cast(this)) >> SLAB_BITS);
assert(res == (uint8_t)res);
return res;
}
@ -67,7 +67,7 @@ namespace snmalloc
static Superslab* get(void* p)
{
return (Superslab*)((size_t)p & SUPERSLAB_MASK);
return pointer_cast<Superslab>(address_cast(p) & SUPERSLAB_MASK);
}
static bool is_short_sizeclass(uint8_t sizeclass)
@ -136,22 +136,16 @@ namespace snmalloc
{
return Available;
}
else
{
return Empty;
}
return Empty;
}
else
if (!is_full())
{
if (!is_full())
{
return OnlyShortSlabAvailable;
}
else
{
return Full;
}
return OnlyShortSlabAvailable;
}
return Full;
}
Metaslab& get_meta(Slab* slab)
@ -172,7 +166,7 @@ namespace snmalloc
if constexpr (decommit_strategy == DecommitAll)
{
memory_provider.template notify_using<NoZero>(
(void*)((size_t)this + OS_PAGE_SIZE), SLAB_SIZE - OS_PAGE_SIZE);
pointer_offset(this, OS_PAGE_SIZE), SLAB_SIZE - OS_PAGE_SIZE);
}
used++;
@ -183,7 +177,8 @@ namespace snmalloc
Slab* alloc_slab(uint8_t sizeclass, MemoryProvider& memory_provider)
{
uint8_t h = head;
Slab* slab = (Slab*)((size_t)this + ((size_t)h << SLAB_BITS));
Slab* slab = pointer_cast<Slab>(
address_cast(this) + (static_cast<size_t>(h) << SLAB_BITS));
uint8_t n = meta[h].next;
@ -207,7 +202,7 @@ namespace snmalloc
Action dealloc_slab(Slab* slab, MemoryProvider& memory_provider)
{
// This is not the short slab.
uint8_t index = (uint8_t)slab_to_index(slab);
uint8_t index = static_cast<uint8_t>(slab_to_index(slab));
uint8_t n = head - index - 1;
meta[index].sizeclass = 0;
@ -234,7 +229,7 @@ namespace snmalloc
if constexpr (decommit_strategy == DecommitAll)
{
memory_provider.notify_not_using(
(void*)((size_t)this + OS_PAGE_SIZE), SLAB_SIZE - OS_PAGE_SIZE);
pointer_offset(this, OS_PAGE_SIZE), SLAB_SIZE - OS_PAGE_SIZE);
}
bool was_full = is_full();
@ -247,4 +242,4 @@ namespace snmalloc
return NoStatusChange;
}
};
}
} // namespace snmalloc

Просмотреть файл

@ -157,7 +157,7 @@ namespace snmalloc
# endif
thread_alloc_release(void* p)
{
Alloc** pp = (Alloc**)p;
Alloc** pp = static_cast<Alloc**>(p);
current_alloc_pool()->release(*pp);
*pp = nullptr;
}
@ -266,4 +266,4 @@ namespace snmalloc
#else
using ThreadAlloc = ThreadAllocExplicitTLSCleanup;
#endif
}
} // namespace snmalloc

Просмотреть файл

@ -254,7 +254,7 @@ extern "C"
if (overflow)
{
errno = ENOMEM;
return 0;
return nullptr;
}
// Include size 0 in the first sizeclass.
sz = ((sz - 1) >> (bits::BITS - 1)) + sz;

Просмотреть файл

@ -5,7 +5,7 @@
namespace snmalloc
{
void error(const char* const str);
}
} // namespace snmalloc
// If simultating OE, then we need the underlying platform
#if !defined(OPEN_ENCLAVE) || defined(OPEN_ENCLAVE_SIMULATION)
@ -50,4 +50,4 @@ namespace snmalloc
{
Pal::error(str);
}
}
} // namespace snmalloc

Просмотреть файл

@ -42,4 +42,4 @@ namespace snmalloc
*/
YesZero
};
}
} // namespace snmalloc

Просмотреть файл

@ -64,7 +64,7 @@ namespace snmalloc
}
template<bool committed>
void* reserve(size_t* size, size_t align) noexcept
void* reserve(const size_t* size, size_t align) noexcept
{
size_t request = *size;
// Alignment must be a power of 2.
@ -78,7 +78,7 @@ namespace snmalloc
size_t log2align = bits::next_pow2_bits(align);
void* p = mmap(
NULL,
nullptr,
request,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_ALIGNED(log2align),
@ -91,5 +91,5 @@ namespace snmalloc
return p;
}
};
}
} // namespace snmalloc
#endif

Просмотреть файл

@ -29,4 +29,4 @@ namespace snmalloc
}
}
};
}
} // namespace snmalloc