зеркало из https://github.com/mozilla/gecko-dev.git
Backed out 4 changesets (bug 1402284) for build bustage
CLOSED TREE Backed out changeset f894ea204bb4 (bug 1402284) Backed out changeset a0c193d65799 (bug 1402284) Backed out changeset 8f83dc111aed (bug 1402284) Backed out changeset 7b7818155442 (bug 1402284) MozReview-Commit-ID: DNthJ588QYa
This commit is contained in:
Родитель
f610ae3d0d
Коммит
6841cb2510
|
@ -108,7 +108,6 @@
|
|||
#include "mozmemory_wrap.h"
|
||||
#include "mozjemalloc.h"
|
||||
#include "mozilla/Atomics.h"
|
||||
#include "mozilla/Alignment.h"
|
||||
#include "mozilla/CheckedInt.h"
|
||||
#include "mozilla/DoublyLinkedList.h"
|
||||
#include "mozilla/GuardObjects.h"
|
||||
|
@ -117,7 +116,6 @@
|
|||
#include "mozilla/Sprintf.h"
|
||||
#include "mozilla/UniquePtr.h"
|
||||
#include "mozilla/Unused.h"
|
||||
#include "mozilla/fallible.h"
|
||||
#include "Utils.h"
|
||||
|
||||
// On Linux, we use madvise(MADV_DONTNEED) to release memory back to the
|
||||
|
@ -302,8 +300,6 @@ _mmap(void* addr, size_t length, int prot, int flags, int fd, off_t offset)
|
|||
// Maximum number of dirty pages per arena.
|
||||
#define DIRTY_MAX_DEFAULT (1U << 8)
|
||||
|
||||
static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
|
||||
|
||||
// Maximum size of L1 cache line. This is used to avoid cache line aliasing,
|
||||
// so over-estimates are okay (up to a point), but under-estimates will
|
||||
// negatively affect performance.
|
||||
|
@ -452,9 +448,6 @@ static Atomic<size_t, ReleaseAcquire> gRecycledSize;
|
|||
#error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
|
||||
#endif
|
||||
|
||||
static void*
|
||||
base_alloc(size_t aSize);
|
||||
|
||||
// Mutexes based on spinlocks. We can't use normal pthread spinlocks in all
|
||||
// places, because they require malloc()ed memory, which causes bootstrapping
|
||||
// issues in some cases.
|
||||
|
@ -957,7 +950,9 @@ public:
|
|||
// --------+------+
|
||||
arena_bin_t mBins[1]; // Dynamically sized.
|
||||
|
||||
arena_t();
|
||||
bool Init();
|
||||
|
||||
static inline arena_t* GetById(arena_id_t aArenaId);
|
||||
|
||||
private:
|
||||
void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
|
||||
|
@ -1021,21 +1016,6 @@ public:
|
|||
void Purge(bool aAll);
|
||||
|
||||
void HardPurge();
|
||||
|
||||
void* operator new(size_t aCount) = delete;
|
||||
|
||||
void* operator new(size_t aCount, const fallible_t&)
|
||||
#if !defined(_MSC_VER) || defined(_CPPUNWIND)
|
||||
noexcept
|
||||
#endif
|
||||
{
|
||||
MOZ_ASSERT(aCount == sizeof(arena_t));
|
||||
// Allocate enough space for trailing bins.
|
||||
return base_alloc(aCount +
|
||||
(sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
|
||||
}
|
||||
|
||||
void operator delete(void*) = delete;
|
||||
};
|
||||
|
||||
struct ArenaTreeTrait
|
||||
|
@ -1053,89 +1033,6 @@ struct ArenaTreeTrait
|
|||
}
|
||||
};
|
||||
|
||||
// Bookkeeping for all the arenas used by the allocator.
|
||||
// Arenas are separated in two categories:
|
||||
// - "private" arenas, used through the moz_arena_* API
|
||||
// - all the other arenas: the default arena, and thread-local arenas,
|
||||
// used by the standard API.
|
||||
class ArenaCollection
|
||||
{
|
||||
public:
|
||||
bool Init()
|
||||
{
|
||||
mArenas.Init();
|
||||
mPrivateArenas.Init();
|
||||
mDefaultArena = mLock.Init() ? CreateArena(/* IsPrivate = */ false) : nullptr;
|
||||
if (mDefaultArena) {
|
||||
// arena_t constructor sets this to a lower value for thread local
|
||||
// arenas; Reset to the default value for the main arena.
|
||||
mDefaultArena->mMaxDirty = opt_dirty_max;
|
||||
}
|
||||
return bool(mDefaultArena);
|
||||
}
|
||||
|
||||
inline arena_t* GetById(arena_id_t aArenaId, bool aIsPrivate);
|
||||
|
||||
arena_t* CreateArena(bool aIsPrivate);
|
||||
|
||||
void DisposeArena(arena_t* aArena)
|
||||
{
|
||||
MutexAutoLock lock(mLock);
|
||||
(mPrivateArenas.Search(aArena) ? mPrivateArenas : mArenas).Remove(aArena);
|
||||
// The arena is leaked, and remaining allocations in it still are alive
|
||||
// until they are freed. After that, the arena will be empty but still
|
||||
// taking have at least a chunk taking address space. TODO: bug 1364359.
|
||||
}
|
||||
|
||||
using Tree = RedBlackTree<arena_t, ArenaTreeTrait>;
|
||||
|
||||
struct Iterator : Tree::Iterator
|
||||
{
|
||||
explicit Iterator(Tree* aTree, Tree* aSecondTree)
|
||||
: Tree::Iterator(aTree)
|
||||
, mNextTree(aSecondTree)
|
||||
{
|
||||
}
|
||||
|
||||
Item<Iterator> begin()
|
||||
{
|
||||
return Item<Iterator>(this, *Tree::Iterator::begin());
|
||||
}
|
||||
|
||||
Item<Iterator> end()
|
||||
{
|
||||
return Item<Iterator>(this, nullptr);
|
||||
}
|
||||
|
||||
Tree::TreeNode* Next()
|
||||
{
|
||||
Tree::TreeNode* result = Tree::Iterator::Next();
|
||||
if (!result && mNextTree) {
|
||||
new (this) Iterator(mNextTree, nullptr);
|
||||
result = reinterpret_cast<Tree::TreeNode*>(*Tree::Iterator::begin());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private:
|
||||
Tree* mNextTree;
|
||||
};
|
||||
|
||||
Iterator iter() { return Iterator(&mArenas, &mPrivateArenas); }
|
||||
|
||||
inline arena_t* GetDefault() { return mDefaultArena; }
|
||||
|
||||
Mutex mLock;
|
||||
|
||||
private:
|
||||
arena_t* mDefaultArena;
|
||||
arena_id_t mLastArenaId;
|
||||
Tree mArenas;
|
||||
Tree mPrivateArenas;
|
||||
};
|
||||
|
||||
static ArenaCollection gArenas;
|
||||
|
||||
// ******
|
||||
// Chunks.
|
||||
static AddressRadixTree<(sizeof(void*) << 3) - CHUNK_2POW_DEFAULT> gChunkRTree;
|
||||
|
@ -1179,6 +1076,13 @@ static size_t base_committed;
|
|||
// ******
|
||||
// Arenas.
|
||||
|
||||
// A tree of all available arenas, arranged by id.
|
||||
// TODO: Move into arena_t as a static member when rb_tree doesn't depend on
|
||||
// the type being defined anymore.
|
||||
static RedBlackTree<arena_t, ArenaTreeTrait> gArenaTree;
|
||||
static unsigned narenas;
|
||||
static Mutex arenas_lock; // Protects arenas initialization.
|
||||
|
||||
// The arena associated with the current thread (per jemalloc_thread_local_arena)
|
||||
// On OSX, __thread/thread_local circles back calling malloc to allocate storage
|
||||
// on first access on each thread, which leads to an infinite loop, but
|
||||
|
@ -1190,6 +1094,10 @@ static detail::ThreadLocal<arena_t*, detail::ThreadLocalKeyStorage>
|
|||
thread_arena;
|
||||
#endif
|
||||
|
||||
// The main arena, which all threads default to until jemalloc_thread_local_arena
|
||||
// is called.
|
||||
static arena_t* gMainArena;
|
||||
|
||||
// *****************************
|
||||
// Runtime configuration options.
|
||||
|
||||
|
@ -1204,6 +1112,8 @@ static const bool opt_junk = false;
|
|||
static const bool opt_zero = false;
|
||||
#endif
|
||||
|
||||
static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
|
||||
|
||||
// ***************************************************************************
|
||||
// Begin forward declarations.
|
||||
|
||||
|
@ -1216,6 +1126,8 @@ static void
|
|||
chunk_dealloc(void* aChunk, size_t aSize, ChunkType aType);
|
||||
static void
|
||||
chunk_ensure_zero(void* aPtr, size_t aSize, bool aZeroed);
|
||||
static arena_t*
|
||||
arenas_extend();
|
||||
static void*
|
||||
huge_malloc(size_t size, bool zero);
|
||||
static void*
|
||||
|
@ -2215,9 +2127,9 @@ thread_local_arena(bool enabled)
|
|||
// called with `false`, but it doesn't matter at the moment.
|
||||
// because in practice nothing actually calls this function
|
||||
// with `false`, except maybe at shutdown.
|
||||
arena = gArenas.CreateArena(/* IsPrivate = */ false);
|
||||
arena = arenas_extend();
|
||||
} else {
|
||||
arena = gArenas.GetDefault();
|
||||
arena = gMainArena;
|
||||
}
|
||||
thread_arena.set(arena);
|
||||
return arena;
|
||||
|
@ -3808,13 +3720,17 @@ iralloc(void* aPtr, size_t aSize, arena_t* aArena)
|
|||
: huge_ralloc(aPtr, aSize, oldsize);
|
||||
}
|
||||
|
||||
arena_t::arena_t()
|
||||
// Returns whether initialization succeeded.
|
||||
bool
|
||||
arena_t::Init()
|
||||
{
|
||||
unsigned i;
|
||||
arena_bin_t* bin;
|
||||
size_t prev_run_size;
|
||||
|
||||
MOZ_RELEASE_ASSERT(mLock.Init());
|
||||
if (!mLock.Init()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
memset(&mLink, 0, sizeof(mLink));
|
||||
memset(&mStats, 0, sizeof(arena_stats_t));
|
||||
|
@ -3878,29 +3794,43 @@ arena_t::arena_t()
|
|||
#if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
|
||||
mMagic = ARENA_MAGIC;
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
arena_t*
|
||||
ArenaCollection::CreateArena(bool aIsPrivate)
|
||||
static inline arena_t*
|
||||
arenas_fallback()
|
||||
{
|
||||
arena_t* ret = new (fallible_t()) arena_t();
|
||||
if (!ret) {
|
||||
// Only reached if there is an OOM error.
|
||||
// Only reached if there is an OOM error.
|
||||
|
||||
// OOM here is quite inconvenient to propagate, since dealing with it
|
||||
// would require a check for failure in the fast path. Instead, punt
|
||||
// by using the first arena.
|
||||
// In practice, this is an extremely unlikely failure.
|
||||
_malloc_message(_getprogname(), ": (malloc) Error initializing arena\n");
|
||||
// OOM here is quite inconvenient to propagate, since dealing with it
|
||||
// would require a check for failure in the fast path. Instead, punt
|
||||
// by using the first arena.
|
||||
// In practice, this is an extremely unlikely failure.
|
||||
_malloc_message(_getprogname(), ": (malloc) Error initializing arena\n");
|
||||
|
||||
return mDefaultArena;
|
||||
return gMainArena;
|
||||
}
|
||||
|
||||
// Create a new arena and return it.
|
||||
static arena_t*
|
||||
arenas_extend()
|
||||
{
|
||||
arena_t* ret;
|
||||
|
||||
// Allocate enough space for trailing bins.
|
||||
ret = (arena_t*)base_alloc(
|
||||
sizeof(arena_t) + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
|
||||
if (!ret || !ret->Init()) {
|
||||
return arenas_fallback();
|
||||
}
|
||||
|
||||
MutexAutoLock lock(mLock);
|
||||
MutexAutoLock lock(arenas_lock);
|
||||
|
||||
// TODO: Use random Ids.
|
||||
ret->mId = mLastArenaId++;
|
||||
(aIsPrivate ? mPrivateArenas : mArenas).Insert(ret);
|
||||
ret->mId = narenas++;
|
||||
gArenaTree.Insert(ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -4280,13 +4210,21 @@ static
|
|||
base_nodes = nullptr;
|
||||
base_mtx.Init();
|
||||
|
||||
// Initialize arenas collection here.
|
||||
if (!gArenas.Init()) {
|
||||
arenas_lock.Init();
|
||||
|
||||
// Initialize one arena here.
|
||||
gArenaTree.Init();
|
||||
arenas_extend();
|
||||
gMainArena = gArenaTree.First();
|
||||
if (!gMainArena) {
|
||||
return false;
|
||||
}
|
||||
// arena_t::Init() sets this to a lower value for thread local arenas;
|
||||
// reset to the default value for the main arena.
|
||||
gMainArena->mMaxDirty = opt_dirty_max;
|
||||
|
||||
// Assign the default arena to the initial thread.
|
||||
thread_arena.set(gArenas.GetDefault());
|
||||
// Assign the initial arena to the initial thread.
|
||||
thread_arena.set(gMainArena);
|
||||
|
||||
if (!gChunkRTree.Init()) {
|
||||
return false;
|
||||
|
@ -4584,6 +4522,7 @@ MozJemalloc::jemalloc_stats(jemalloc_stats_t* aStats)
|
|||
// Gather runtime settings.
|
||||
aStats->opt_junk = opt_junk;
|
||||
aStats->opt_zero = opt_zero;
|
||||
aStats->narenas = narenas;
|
||||
aStats->quantum = quantum;
|
||||
aStats->small_max = small_max;
|
||||
aStats->large_max = arena_maxclass;
|
||||
|
@ -4592,7 +4531,6 @@ MozJemalloc::jemalloc_stats(jemalloc_stats_t* aStats)
|
|||
aStats->dirty_max = opt_dirty_max;
|
||||
|
||||
// Gather current memory usage statistics.
|
||||
aStats->narenas = 0;
|
||||
aStats->mapped = 0;
|
||||
aStats->allocated = 0;
|
||||
aStats->waste = 0;
|
||||
|
@ -4618,13 +4556,17 @@ MozJemalloc::jemalloc_stats(jemalloc_stats_t* aStats)
|
|||
MOZ_ASSERT(base_mapped >= base_committed);
|
||||
}
|
||||
|
||||
gArenas.mLock.Lock();
|
||||
arenas_lock.Lock();
|
||||
// Iterate over arenas.
|
||||
for (auto arena : gArenas.iter()) {
|
||||
for (auto arena : gArenaTree.iter()) {
|
||||
size_t arena_mapped, arena_allocated, arena_committed, arena_dirty, j,
|
||||
arena_unused, arena_headers;
|
||||
arena_run_t* run;
|
||||
|
||||
if (!arena) {
|
||||
continue;
|
||||
}
|
||||
|
||||
arena_headers = 0;
|
||||
arena_unused = 0;
|
||||
|
||||
|
@ -4671,9 +4613,8 @@ MozJemalloc::jemalloc_stats(jemalloc_stats_t* aStats)
|
|||
arena_unused - arena_headers;
|
||||
aStats->bin_unused += arena_unused;
|
||||
aStats->bookkeeping += arena_headers;
|
||||
aStats->narenas++;
|
||||
}
|
||||
gArenas.mLock.Unlock();
|
||||
arenas_lock.Unlock();
|
||||
|
||||
// Account for arena chunk headers in bookkeeping rather than waste.
|
||||
chunk_header_size =
|
||||
|
@ -4737,8 +4678,8 @@ inline void
|
|||
MozJemalloc::jemalloc_purge_freed_pages()
|
||||
{
|
||||
if (malloc_initialized) {
|
||||
MutexAutoLock lock(gArenas.mLock);
|
||||
for (auto arena : gArenas.iter()) {
|
||||
MutexAutoLock lock(arenas_lock);
|
||||
for (auto arena : gArenaTree.iter()) {
|
||||
arena->HardPurge();
|
||||
}
|
||||
}
|
||||
|
@ -4760,8 +4701,8 @@ inline void
|
|||
MozJemalloc::jemalloc_free_dirty_pages(void)
|
||||
{
|
||||
if (malloc_initialized) {
|
||||
MutexAutoLock lock(gArenas.mLock);
|
||||
for (auto arena : gArenas.iter()) {
|
||||
MutexAutoLock lock(arenas_lock);
|
||||
for (auto arena : gArenaTree.iter()) {
|
||||
MutexAutoLock arena_lock(arena->mLock);
|
||||
arena->Purge(true);
|
||||
}
|
||||
|
@ -4769,17 +4710,15 @@ MozJemalloc::jemalloc_free_dirty_pages(void)
|
|||
}
|
||||
|
||||
inline arena_t*
|
||||
ArenaCollection::GetById(arena_id_t aArenaId, bool aIsPrivate)
|
||||
arena_t::GetById(arena_id_t aArenaId)
|
||||
{
|
||||
if (!malloc_initialized) {
|
||||
return nullptr;
|
||||
}
|
||||
// Use AlignedStorage2 to avoid running the arena_t constructor, while
|
||||
// we only need it as a placeholder for mId.
|
||||
mozilla::AlignedStorage2<arena_t> key;
|
||||
key.addr()->mId = aArenaId;
|
||||
MutexAutoLock lock(mLock);
|
||||
arena_t* result = (aIsPrivate ? mPrivateArenas : mArenas).Search(key.addr());
|
||||
arena_t key;
|
||||
key.mId = aArenaId;
|
||||
MutexAutoLock lock(arenas_lock);
|
||||
arena_t* result = gArenaTree.Search(&key);
|
||||
MOZ_RELEASE_ASSERT(result);
|
||||
return result;
|
||||
}
|
||||
|
@ -4790,7 +4729,7 @@ inline arena_id_t
|
|||
MozJemalloc::moz_create_arena()
|
||||
{
|
||||
if (malloc_init()) {
|
||||
arena_t* arena = gArenas.CreateArena(/* IsPrivate = */ true);
|
||||
arena_t* arena = arenas_extend();
|
||||
return arena->mId;
|
||||
}
|
||||
return 0;
|
||||
|
@ -4800,9 +4739,13 @@ template<>
|
|||
inline void
|
||||
MozJemalloc::moz_dispose_arena(arena_id_t aArenaId)
|
||||
{
|
||||
arena_t* arena = gArenas.GetById(aArenaId, /* IsPrivate = */ true);
|
||||
arena_t* arena = arena_t::GetById(aArenaId);
|
||||
if (arena) {
|
||||
gArenas.DisposeArena(arena);
|
||||
MutexAutoLock lock(arenas_lock);
|
||||
gArenaTree.Remove(arena);
|
||||
// The arena is leaked, and remaining allocations in it still are alive
|
||||
// until they are freed. After that, the arena will be empty but still
|
||||
// taking have at least a chunk taking address space. TODO: bug 1364359.
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4811,7 +4754,7 @@ MozJemalloc::moz_dispose_arena(arena_id_t aArenaId)
|
|||
inline return_type MozJemalloc::moz_arena_##name( \
|
||||
arena_id_t aArenaId, ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
|
||||
{ \
|
||||
BaseAllocator allocator(gArenas.GetById(aArenaId, /* IsPrivate = */ true));\
|
||||
BaseAllocator allocator(arena_t::GetById(aArenaId)); \
|
||||
return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
|
||||
}
|
||||
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
|
||||
|
@ -4844,9 +4787,9 @@ static
|
|||
_malloc_prefork(void)
|
||||
{
|
||||
// Acquire all mutexes in a safe order.
|
||||
gArenas.mLock.Lock();
|
||||
arenas_lock.Lock();
|
||||
|
||||
for (auto arena : gArenas.iter()) {
|
||||
for (auto arena : gArenaTree.iter()) {
|
||||
arena->mLock.Lock();
|
||||
}
|
||||
|
||||
|
@ -4866,11 +4809,10 @@ static
|
|||
|
||||
base_mtx.Unlock();
|
||||
|
||||
for (auto arena : gArenas.iter()) {
|
||||
for (auto arena : gArenaTree.iter()) {
|
||||
arena->mLock.Unlock();
|
||||
}
|
||||
|
||||
gArenas.mLock.Unlock();
|
||||
arenas_lock.Unlock();
|
||||
}
|
||||
|
||||
#ifndef XP_DARWIN
|
||||
|
@ -4884,11 +4826,10 @@ static
|
|||
|
||||
base_mtx.Init();
|
||||
|
||||
for (auto arena : gArenas.iter()) {
|
||||
for (auto arena : gArenaTree.iter()) {
|
||||
arena->mLock.Init();
|
||||
}
|
||||
|
||||
gArenas.mLock.Init();
|
||||
arenas_lock.Init();
|
||||
}
|
||||
|
||||
// End library-private functions.
|
||||
|
|
|
@ -67,7 +67,6 @@
|
|||
#ifndef RB_H_
|
||||
#define RB_H_
|
||||
|
||||
#include "mozilla/Alignment.h"
|
||||
#include "Utils.h"
|
||||
|
||||
enum NodeColor
|
||||
|
@ -178,6 +177,7 @@ public:
|
|||
return Remove(reinterpret_cast<TreeNode*>(aNode));
|
||||
}
|
||||
|
||||
private:
|
||||
/* Helper class to avoid having all the tree traversal code further below
|
||||
* have to use Trait::GetTreeNode, adding visual noise. */
|
||||
struct TreeNode : public T
|
||||
|
@ -223,7 +223,6 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
private:
|
||||
TreeNode* mRoot;
|
||||
|
||||
TreeNode* First(TreeNode* aStart)
|
||||
|
@ -328,16 +327,14 @@ private:
|
|||
|
||||
void Insert(TreeNode* aNode)
|
||||
{
|
||||
// rbp_i_s is only used as a placeholder for its RedBlackTreeNode. Use
|
||||
// AlignedStorage2 to avoid running the TreeNode base class constructor.
|
||||
mozilla::AlignedStorage2<TreeNode> rbp_i_s;
|
||||
TreeNode rbp_i_s;
|
||||
TreeNode *rbp_i_g, *rbp_i_p, *rbp_i_c, *rbp_i_t, *rbp_i_u;
|
||||
int rbp_i_cmp = 0;
|
||||
rbp_i_g = nullptr;
|
||||
rbp_i_p = rbp_i_s.addr();
|
||||
rbp_i_p->SetLeft(mRoot);
|
||||
rbp_i_p->SetRight(nullptr);
|
||||
rbp_i_p->SetColor(NodeColor::Black);
|
||||
rbp_i_s.SetLeft(mRoot);
|
||||
rbp_i_s.SetRight(nullptr);
|
||||
rbp_i_s.SetColor(NodeColor::Black);
|
||||
rbp_i_p = &rbp_i_s;
|
||||
rbp_i_c = mRoot;
|
||||
/* Iteratively search down the tree for the insertion point,
|
||||
* splitting 4-nodes as they are encountered. At the end of each
|
||||
|
@ -408,21 +405,19 @@ private:
|
|||
rbp_i_p->SetLeft(aNode);
|
||||
}
|
||||
/* Update the root and make sure that it is black. */
|
||||
mRoot = rbp_i_s.addr()->Left();
|
||||
mRoot = rbp_i_s.Left();
|
||||
mRoot->SetColor(NodeColor::Black);
|
||||
}
|
||||
|
||||
void Remove(TreeNode* aNode)
|
||||
{
|
||||
// rbp_r_s is only used as a placeholder for its RedBlackTreeNode. Use
|
||||
// AlignedStorage2 to avoid running the TreeNode base class constructor.
|
||||
mozilla::AlignedStorage2<TreeNode> rbp_r_s;
|
||||
TreeNode rbp_r_s;
|
||||
TreeNode *rbp_r_p, *rbp_r_c, *rbp_r_xp, *rbp_r_t, *rbp_r_u;
|
||||
int rbp_r_cmp;
|
||||
rbp_r_p = rbp_r_s.addr();
|
||||
rbp_r_p->SetLeft(mRoot);
|
||||
rbp_r_p->SetRight(nullptr);
|
||||
rbp_r_p->SetColor(NodeColor::Black);
|
||||
rbp_r_s.SetLeft(mRoot);
|
||||
rbp_r_s.SetRight(nullptr);
|
||||
rbp_r_s.SetColor(NodeColor::Black);
|
||||
rbp_r_p = &rbp_r_s;
|
||||
rbp_r_c = mRoot;
|
||||
rbp_r_xp = nullptr;
|
||||
/* Iterate down the tree, but always transform 2-nodes to 3- or
|
||||
|
@ -582,7 +577,7 @@ private:
|
|||
}
|
||||
}
|
||||
/* Update root. */
|
||||
mRoot = rbp_r_s.addr()->Left();
|
||||
mRoot = rbp_r_s.Left();
|
||||
}
|
||||
|
||||
TreeNode* RotateLeft(TreeNode* aNode)
|
||||
|
@ -733,7 +728,6 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
template<typename Iterator>
|
||||
class Item
|
||||
{
|
||||
Iterator* mIterator;
|
||||
|
@ -759,14 +753,14 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
Item<Iterator> begin()
|
||||
Item begin()
|
||||
{
|
||||
return Item<Iterator>(this, mDepth > 0 ? mPath[mDepth - 1] : nullptr);
|
||||
return Item(this, mDepth > 0 ? mPath[mDepth - 1] : nullptr);
|
||||
}
|
||||
|
||||
Item<Iterator> end()
|
||||
Item end()
|
||||
{
|
||||
return Item<Iterator>(this, nullptr);
|
||||
return Item(this, nullptr);
|
||||
}
|
||||
|
||||
TreeNode* Next()
|
||||
|
|
Загрузка…
Ссылка в новой задаче