Merge pull request #3 from Microsoft/ThirtyTwoBit

Windows 32bit build
This commit is contained in:
Matthew Parkinson 2019-01-16 17:12:03 +00:00 коммит произвёл GitHub
Родитель e2190f376f ab57c86e3a
Коммит dca05e2556
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
5 изменённых файлов: 154 добавлений и 39 удалений

Просмотреть файл

@ -50,7 +50,7 @@ phases:
failOnStderr: true
displayName: 'LD_PRELOAD Compile'
- phase: Windows
- phase: Windows64bit
queue:
name: 'Hosted VS2017'
parallel: 2
@ -76,6 +76,32 @@ phases:
workingDirectory: build
displayName: 'Run Ctest'
- phase: Windows32bit
queue:
name: 'Hosted VS2017'
parallel: 2
matrix:
Debug:
BuildType: Debug
Release:
BuildType: Release
steps:
- task: CMake@1
displayName: 'CMake .. -G"Visual Studio 15 2017"'
inputs:
cmakeArgs: '.. -G"Visual Studio 15 2017"'
- task: MSBuild@1
displayName: 'Build solution build/snmalloc.sln'
inputs:
solution: build/snmalloc.sln
msbuildArguments: '/m /p:Configuration=$(BuildType)'
- script: 'ctest -j 4 --interactive-debug-mode 0 --output-on-failure'
workingDirectory: build
displayName: 'Run Ctest'
- phase: Format
queue:
name: 'Hosted Ubuntu 1604'

Просмотреть файл

@ -38,7 +38,17 @@ namespace snmalloc
PMMediumslab = 2
};
using SuperslabPagemap = Pagemap<SUPERSLAB_BITS, uint8_t, 0>;
#ifndef SNMALLOC_MAX_FLATPAGEMAP_SIZE
// Use flat map is under a single node.
# define SNMALLOC_MAX_FLATPAGEMAP_SIZE PAGEMAP_NODE_SIZE
#endif
static constexpr bool USE_FLATPAGEMAP = SNMALLOC_MAX_FLATPAGEMAP_SIZE >=
sizeof(FlatPagemap<SUPERSLAB_BITS, uint8_t>);
using SuperslabPagemap = std::conditional_t<
USE_FLATPAGEMAP,
FlatPagemap<SUPERSLAB_BITS, uint8_t>,
Pagemap<SUPERSLAB_BITS, uint8_t, 0>>;
HEADER_GLOBAL SuperslabPagemap global_pagemap;
/**
@ -131,7 +141,7 @@ namespace snmalloc
};
static_assert(
SUPERSLAB_SIZE == Pagemap<SUPERSLAB_BITS, size_t, 0>::GRANULARITY,
SUPERSLAB_SIZE == SuperslabPagemap::GRANULARITY,
"The superslab size should be the same as the pagemap granularity");
#ifndef SNMALLOC_DEFAULT_PAGEMAP
@ -521,9 +531,7 @@ namespace snmalloc
this->size += sizeclass_to_size(sizeclass);
Remote* r = (Remote*)p;
r->set_sizeclass(sizeclass);
assert(r->sizeclass() == sizeclass);
r->set_target_id(target_id);
r->set_sizeclass_and_target_id(target_id, sizeclass);
assert(r->sizeclass() == sizeclass);
assert(r->target_id() == target_id);
@ -588,10 +596,6 @@ namespace snmalloc
}
};
static_assert(
sizeof(Remote) <= MIN_ALLOC_SIZE,
"Need to be able to cast any small alloc to Remote");
SlabList small_classes[NUM_SMALL_CLASSES];
DLList<Mediumslab> medium_classes[NUM_MEDIUM_CLASSES];

Просмотреть файл

@ -7,13 +7,13 @@
namespace snmalloc
{
static constexpr size_t PAGEMAP_NODE_BITS = 16;
static constexpr size_t PAGEMAP_NODE_SIZE = 1ULL << PAGEMAP_NODE_BITS;
template<size_t GRANULARITY_BITS, typename T, T default_content>
class Pagemap
{
private:
static constexpr size_t PAGEMAP_BITS = 16;
static constexpr size_t PAGEMAP_SIZE = 1 << PAGEMAP_BITS;
static constexpr size_t COVERED_BITS =
bits::ADDRESS_BITS - GRANULARITY_BITS;
static constexpr size_t POINTER_BITS =
@ -21,11 +21,16 @@ namespace snmalloc
static constexpr size_t CONTENT_BITS =
bits::next_pow2_bits_const(sizeof(T));
static constexpr size_t BITS_FOR_LEAF = PAGEMAP_BITS - CONTENT_BITS;
static_assert(
PAGEMAP_NODE_BITS - CONTENT_BITS < COVERED_BITS,
"Should use the FlatPageMap as it does not require a tree");
static constexpr size_t BITS_FOR_LEAF = PAGEMAP_NODE_BITS - CONTENT_BITS;
static constexpr size_t ENTRIES_PER_LEAF = 1 << BITS_FOR_LEAF;
static constexpr size_t LEAF_MASK = ENTRIES_PER_LEAF - 1;
static constexpr size_t BITS_PER_INDEX_LEVEL = PAGEMAP_BITS - POINTER_BITS;
static constexpr size_t BITS_PER_INDEX_LEVEL =
PAGEMAP_NODE_BITS - POINTER_BITS;
static constexpr size_t ENTRIES_PER_INDEX_LEVEL = 1 << BITS_PER_INDEX_LEVEL;
static constexpr size_t ENTRIES_MASK = ENTRIES_PER_INDEX_LEVEL - 1;
@ -81,7 +86,7 @@ namespace snmalloc
value, (PagemapEntry*)LOCKED_ENTRY, std::memory_order_relaxed))
{
auto& v = default_memory_provider;
value = (PagemapEntry*)v.alloc_chunk(PAGEMAP_SIZE);
value = (PagemapEntry*)v.alloc_chunk(PAGEMAP_NODE_SIZE);
e->store(value, std::memory_order_release);
}
else
@ -155,9 +160,6 @@ namespace snmalloc
return &(leaf_ix.first->values[leaf_ix.second]);
}
public:
static constexpr size_t GRANULARITY = 1 << GRANULARITY_BITS;
/**
* Returns the index of a pagemap entry within a given page. This is used
* in code that propagates changes to the pagemap elsewhere.
@ -186,6 +188,9 @@ namespace snmalloc
return get_addr<true>(p, success);
}
public:
static constexpr size_t GRANULARITY = 1 << GRANULARITY_BITS;
T get(void* p)
{
bool success;
@ -224,4 +229,48 @@ namespace snmalloc
} while (length > 0);
}
};
/**
* Simple pagemap that for each GRANULARITY_BITS of the address range
* stores a T.
**/
template<size_t GRANULARITY_BITS, typename T>
class FlatPagemap
{
private:
static constexpr size_t COVERED_BITS =
bits::ADDRESS_BITS - GRANULARITY_BITS;
static constexpr size_t CONTENT_BITS =
bits::next_pow2_bits_const(sizeof(T));
static constexpr size_t ENTRIES = 1ULL << (COVERED_BITS + CONTENT_BITS);
static constexpr size_t SHIFT = GRANULARITY_BITS;
public:
static constexpr size_t GRANULARITY = 1 << GRANULARITY_BITS;
private:
std::atomic<T> top[ENTRIES];
public:
T get(void* p)
{
return top[(size_t)p >> SHIFT].load(std::memory_order_relaxed);
}
void set(void* p, T x)
{
top[(size_t)p >> SHIFT].store(x, std::memory_order_relaxed);
}
void set_range(void* p, T x, size_t length)
{
size_t index = (size_t)p >> SHIFT;
do
{
top[index].store(x, std::memory_order_relaxed);
index++;
length--;
} while (length > 0);
}
};
}

Просмотреть файл

@ -9,11 +9,14 @@ namespace snmalloc
{
struct Remote
{
static const uint64_t SIZECLASS_SHIFT = 56;
static const uint64_t SIZECLASS_MASK = 0xffULL << SIZECLASS_SHIFT;
static const uint64_t TARGET_MASK = ~SIZECLASS_MASK;
static_assert(SIZECLASS_MASK == 0xff00'0000'0000'0000ULL);
static const size_t PTR_BITS = sizeof(void*) * 8;
static const size_t SIZECLASS_BITS = sizeof(uint8_t) * 8;
static const bool USE_TOP_BITS =
SIZECLASS_BITS + bits::ADDRESS_BITS <= PTR_BITS;
static const uintptr_t SIZECLASS_SHIFT = PTR_BITS - SIZECLASS_BITS;
static const uintptr_t SIZECLASS_MASK = ((1ULL << SIZECLASS_BITS) - 1)
<< SIZECLASS_SHIFT;
static const uintptr_t TARGET_MASK = ~SIZECLASS_MASK;
using alloc_id_t = size_t;
union
@ -22,32 +25,64 @@ namespace snmalloc
Remote* non_atomic_next;
};
uint64_t value;
uintptr_t value;
// This will not exist for the minimum object size. This is only used if
// USE_TOP_BITS is false, and the bottom bit of value is set.
uint8_t possible_sizeclass;
void set_target_id(alloc_id_t id)
void set_sizeclass_and_target_id(alloc_id_t id, uint8_t sizeclass)
{
assert(id == (id & TARGET_MASK));
value = (id & TARGET_MASK) | (value & SIZECLASS_MASK);
}
void set_sizeclass(uint8_t sizeclass)
{
value = (value & TARGET_MASK) |
((static_cast<uint64_t>(sizeclass) << SIZECLASS_SHIFT) &
SIZECLASS_MASK);
if constexpr (USE_TOP_BITS)
{
assert(id == (id & TARGET_MASK));
value = (id & TARGET_MASK) |
((static_cast<uint64_t>(sizeclass) << SIZECLASS_SHIFT) &
SIZECLASS_MASK);
}
else
{
assert((id & 1) == 0);
if (sizeclass == 0)
{
value = id | 1;
}
else
{
value = id;
possible_sizeclass = sizeclass;
}
}
}
alloc_id_t target_id()
{
return value & TARGET_MASK;
if constexpr (USE_TOP_BITS)
{
return value & TARGET_MASK;
}
else
{
return value & ~1;
}
}
uint8_t sizeclass()
{
return (value & SIZECLASS_MASK) >> SIZECLASS_SHIFT;
if constexpr (USE_TOP_BITS)
{
return (value & SIZECLASS_MASK) >> SIZECLASS_SHIFT;
}
else
{
return ((value & 1) == 1) ? 0 : possible_sizeclass;
}
}
};
static_assert(
(offsetof(Remote, possible_sizeclass)) <= MIN_ALLOC_SIZE,
"Need to be able to cast any small alloc to Remote");
struct RemoteAllocator
{
using alloc_id_t = Remote::alloc_id_t;

Просмотреть файл

@ -210,14 +210,15 @@ void test_external_pointer_large()
auto* alloc = ThreadAlloc::get();
constexpr size_t count_log = 5;
constexpr size_t count_log = snmalloc::bits::is64() ? 5 : 3;
constexpr size_t count = 1 << count_log;
// Pre allocate all the objects
size_t* objects[count];
for (size_t i = 0; i < count; i++)
{
size_t rand = r.next() & ((1 << 28) - 1);
size_t b = snmalloc::bits::is64() ? 28 : 26;
size_t rand = r.next() & ((1 << b) - 1);
size_t size = (1 << 24) + rand;
// store object
objects[i] = (size_t*)alloc->alloc(size);