Bug 1411786 - Don't call chunk_recycle for base allocations. r?njn
Instead of calling it with a boolean indicating whether the call was for
base allocations or not, and return immediately if it was, avoid the
call altogether.
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -1925,34 +1925,24 @@ pages_purge(void *addr, size_t length, b
# undef JEMALLOC_MADV_ZEROS
# endif
#endif
}
static void *
chunk_recycle(RedBlackTree<extent_node_t, ExtentTreeSzTrait>* chunks_szad,
RedBlackTree<extent_node_t, ExtentTreeTrait>* chunks_ad,
- size_t size, size_t alignment, bool base, bool *zeroed)
+ size_t size, size_t alignment, bool *zeroed)
{
void *ret;
extent_node_t *node;
extent_node_t key;
size_t alloc_size, leadsize, trailsize;
ChunkType chunk_type;
- if (base) {
- /*
- * This function may need to call base_node_{,de}alloc(), but
- * the current chunk allocation request is on behalf of the
- * base allocator. Avoid deadlock (and if that weren't an
- * issue, potential for infinite recursion) by returning nullptr.
- */
- return nullptr;
- }
-
alloc_size = size + alignment - chunksize;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return nullptr;
key.addr = nullptr;
key.size = alloc_size;
chunks_mtx.Lock();
node = chunks_szad->SearchOrNext(&key);
@@ -2045,19 +2035,21 @@ chunk_alloc(size_t size, size_t alignmen
{
void *ret;
MOZ_ASSERT(size != 0);
MOZ_ASSERT((size & chunksize_mask) == 0);
MOZ_ASSERT(alignment != 0);
MOZ_ASSERT((alignment & chunksize_mask) == 0);
- if (CAN_RECYCLE(size)) {
+ // Base allocations can't be fulfilled by recycling because of
+ // possible deadlock or infinite recursion.
+ if (CAN_RECYCLE(size) && !base) {
ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap,
- size, alignment, base, zeroed);
+ size, alignment, zeroed);
if (ret)
goto RETURN;
}
ret = chunk_alloc_mmap(size, alignment);
if (zeroed)
*zeroed = true;
if (ret) {
goto RETURN;