Bug 956501 - Update mozjemalloc chunk_alloc_mmap functions to latest upstream version r=glandium

This commit is contained in:
Jon Coppeard 2014-01-14 10:06:25 +00:00
Родитель 7bb587e775
Коммит 8e8c44704e
1 изменённых файлов: 62 добавлений и 114 удалений

Просмотреть файл

@ -2698,137 +2698,85 @@ RETURN:
#else /* ! (defined(MOZ_MEMORY_WINDOWS) || defined(JEMALLOC_USES_MAP_ALIGN) || defined(MALLOC_PAGEFILE)) */ #else /* ! (defined(MOZ_MEMORY_WINDOWS) || defined(JEMALLOC_USES_MAP_ALIGN) || defined(MALLOC_PAGEFILE)) */
/* /* pages_trim, chunk_alloc_mmap_slow and chunk_alloc_mmap were cherry-picked
* Used by chunk_alloc_mmap() to decide whether to attempt the fast path and * from upstream jemalloc 3.4.1 to fix Mozilla bug 956501. */
* potentially avoid some system calls.
*/
#ifndef NO_TLS
static __thread bool mmap_unaligned_tls __attribute__((tls_model("initial-exec")));
#define MMAP_UNALIGNED_GET() mmap_unaligned_tls
#define MMAP_UNALIGNED_SET(v) do { \
mmap_unaligned_tls = (v); \
} while (0)
#else
#define NEEDS_PTHREAD_MMAP_UNALIGNED_TSD
static pthread_key_t mmap_unaligned_tsd;
#define MMAP_UNALIGNED_GET() ((bool)pthread_getspecific(mmap_unaligned_tsd))
#define MMAP_UNALIGNED_SET(v) do { \
pthread_setspecific(mmap_unaligned_tsd, (void *)(v)); \
} while (0)
#endif
/* chunk_alloc_mmap_slow and chunk_alloc_mmap were cherry-picked from upstream /* Return the offset between a and the nearest aligned address at or below a. */
* jemalloc 2.2.3 to fix Mozilla bug 694896, enable jemalloc on Mac 10.7. */ #define ALIGNMENT_ADDR2OFFSET(a, alignment) \
((size_t)((uintptr_t)(a) & (alignment - 1)))
/* Return the smallest alignment multiple that is >= s. */
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & (-(alignment)))
static void * static void *
chunk_alloc_mmap_slow(size_t size, bool unaligned) pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
{ {
void *ret; void *ret = (void *)((uintptr_t)addr + leadsize);
size_t offset;
/* Beware size_t wrap-around. */ assert(alloc_size >= leadsize + size);
if (size + chunksize <= size) size_t trailsize = alloc_size - leadsize - size;
return (NULL);
ret = pages_map(NULL, size + chunksize, -1); if (leadsize != 0)
if (ret == NULL) pages_unmap(addr, leadsize);
return (NULL); if (trailsize != 0)
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
return (ret);
}
/* Clean up unneeded leading/trailing space. */ static void *
offset = CHUNK_ADDR2OFFSET(ret); chunk_alloc_mmap_slow(size_t size, size_t alignment)
if (offset != 0) { {
/* Note that mmap() returned an unaligned mapping. */ void *ret, *pages;
unaligned = true; size_t alloc_size, leadsize;
/* Leading space. */ alloc_size = size + alignment - pagesize;
pages_unmap(ret, chunksize - offset); /* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
do {
pages = pages_map(NULL, alloc_size, -1);
if (pages == NULL)
return (NULL);
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
(uintptr_t)pages;
ret = pages_trim(pages, alloc_size, leadsize, size);
} while (ret == NULL);
ret = (void *)((uintptr_t)ret + assert(ret != NULL);
(chunksize - offset)); return (ret);
/* Trailing space. */
pages_unmap((void *)((uintptr_t)ret + size),
offset);
} else {
/* Trailing space only. */
pages_unmap((void *)((uintptr_t)ret + size),
chunksize);
}
/*
* If mmap() returned an aligned mapping, reset mmap_unaligned so that
* the next chunk_alloc_mmap() execution tries the fast allocation
* method.
*/
if (unaligned == false)
MMAP_UNALIGNED_SET(false);
return (ret);
} }
static void * static void *
chunk_alloc_mmap(size_t size, bool pagefile) chunk_alloc_mmap(size_t size, bool pagefile)
{ {
void *ret; void *ret;
size_t offset;
/* /*
* Ideally, there would be a way to specify alignment to mmap() (like * Ideally, there would be a way to specify alignment to mmap() (like
* NetBSD has), but in the absence of such a feature, we have to work * NetBSD has), but in the absence of such a feature, we have to work
* hard to efficiently create aligned mappings. The reliable, but * hard to efficiently create aligned mappings. The reliable, but
* slow method is to create a mapping that is over-sized, then trim the * slow method is to create a mapping that is over-sized, then trim the
* excess. However, that always results in at least one call to * excess. However, that always results in one or two calls to
* pages_unmap(). * pages_unmap().
* *
* A more optimistic approach is to try mapping precisely the right * Optimistically try mapping precisely the right amount before falling
* amount, then try to append another mapping if alignment is off. In * back to the slow method, with the expectation that the optimistic
* practice, this works out well as long as the application is not * approach works most of the time.
* interleaving mappings via direct mmap() calls. If we do run into a */
* situation where there is an interleaved mapping and we are unable to
* extend an unaligned mapping, our best option is to switch to the
* slow method until mmap() returns another aligned mapping. This will
* tend to leave a gap in the memory map that is too small to cause
* later problems for the optimistic method.
*
* Another possible confounding factor is address space layout
* randomization (ASLR), which causes mmap(2) to disregard the
* requested address. mmap_unaligned tracks whether the previous
* chunk_alloc_mmap() execution received any unaligned or relocated
* mappings, and if so, the current execution will immediately fall
* back to the slow method. However, we keep track of whether the fast
* method would have succeeded, and if so, we make a note to try the
* fast method next time.
*/
if (MMAP_UNALIGNED_GET() == false) { ret = pages_map(NULL, size, -1);
size_t offset; if (ret == NULL)
return (NULL);
offset = ALIGNMENT_ADDR2OFFSET(ret, chunksize);
if (offset != 0) {
pages_unmap(ret, size);
return (chunk_alloc_mmap_slow(size, chunksize));
}
ret = pages_map(NULL, size, -1); assert(ret != NULL);
if (ret == NULL) return (ret);
return (NULL);
offset = CHUNK_ADDR2OFFSET(ret);
if (offset != 0) {
MMAP_UNALIGNED_SET(true);
/* Try to extend chunk boundary. */
if (pages_map((void *)((uintptr_t)ret + size),
chunksize - offset, -1) == NULL) {
/*
* Extension failed. Clean up, then revert to
* the reliable-but-expensive method.
*/
pages_unmap(ret, size);
ret = chunk_alloc_mmap_slow(size, true);
} else {
/* Clean up unneeded leading space. */
pages_unmap(ret, chunksize - offset);
ret = (void *)((uintptr_t)ret + (chunksize -
offset));
}
}
} else
ret = chunk_alloc_mmap_slow(size, false);
return (ret);
} }
#endif /* defined(MOZ_MEMORY_WINDOWS) || defined(JEMALLOC_USES_MAP_ALIGN) || defined(MALLOC_PAGEFILE) */ #endif /* defined(MOZ_MEMORY_WINDOWS) || defined(JEMALLOC_USES_MAP_ALIGN) || defined(MALLOC_PAGEFILE) */