--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -1085,18 +1085,18 @@ static AddressRadixTree<(SIZEOF_PTR << 3
static Mutex chunks_mtx;
/*
* Trees of chunks that were previously allocated (trees differ only in node
* ordering). These are used when allocating chunks, in an attempt to re-use
* address space. Depending on function, different tree orderings are needed,
* which is why there are two trees with the same contents.
*/
-static RedBlackTree<extent_node_t, ExtentTreeSzTrait> chunks_szad_mmap;
-static RedBlackTree<extent_node_t, ExtentTreeTrait> chunks_ad_mmap;
+static RedBlackTree<extent_node_t, ExtentTreeSzTrait> gChunksBySize;
+static RedBlackTree<extent_node_t, ExtentTreeTrait> gChunksByAddress;
/* Protects huge allocation-related data structures. */
static Mutex huge_mtx;
/* Tree of chunks that are stand-alone huge allocations. */
static RedBlackTree<extent_node_t, ExtentTreeTrait> huge;
/* Huge allocation statistics. */
@@ -1923,55 +1923,53 @@ pages_purge(void *addr, size_t length, b
return JEMALLOC_MADV_ZEROS && err == 0;
# undef JEMALLOC_MADV_PURGE
# undef JEMALLOC_MADV_ZEROS
# endif
#endif
}
static void *
-chunk_recycle(RedBlackTree<extent_node_t, ExtentTreeSzTrait>* chunks_szad,
- RedBlackTree<extent_node_t, ExtentTreeTrait>* chunks_ad,
- size_t size, size_t alignment, bool *zeroed)
+chunk_recycle(size_t size, size_t alignment, bool *zeroed)
{
void *ret;
extent_node_t *node;
extent_node_t key;
size_t alloc_size, leadsize, trailsize;
ChunkType chunk_type;
alloc_size = size + alignment - chunksize;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return nullptr;
key.addr = nullptr;
key.size = alloc_size;
chunks_mtx.Lock();
- node = chunks_szad->SearchOrNext(&key);
+ node = gChunksBySize.SearchOrNext(&key);
if (!node) {
chunks_mtx.Unlock();
return nullptr;
}
leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
(uintptr_t)node->addr;
MOZ_ASSERT(node->size >= leadsize + size);
trailsize = node->size - leadsize - size;
ret = (void *)((uintptr_t)node->addr + leadsize);
chunk_type = node->chunk_type;
if (zeroed) {
*zeroed = (chunk_type == ZEROED_CHUNK);
}
/* Remove node from the tree. */
- chunks_szad->Remove(node);
- chunks_ad->Remove(node);
+ gChunksBySize.Remove(node);
+ gChunksByAddress.Remove(node);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
node->size = leadsize;
- chunks_szad->Insert(node);
- chunks_ad->Insert(node);
+ gChunksBySize.Insert(node);
+ gChunksByAddress.Insert(node);
node = nullptr;
}
if (trailsize != 0) {
/* Insert the trailing space as a smaller chunk. */
if (!node) {
/*
* An additional node is required, but
* base_node_alloc() can cause a new base chunk to be
@@ -1985,18 +1983,18 @@ chunk_recycle(RedBlackTree<extent_node_t
chunk_dealloc(ret, size, chunk_type);
return nullptr;
}
chunks_mtx.Lock();
}
node->addr = (void *)((uintptr_t)(ret) + size);
node->size = trailsize;
node->chunk_type = chunk_type;
- chunks_szad->Insert(node);
- chunks_ad->Insert(node);
+ gChunksBySize.Insert(node);
+ gChunksByAddress.Insert(node);
node = nullptr;
}
recycled_size -= size;
chunks_mtx.Unlock();
if (node)
@@ -2038,18 +2036,17 @@ chunk_alloc(size_t size, size_t alignmen
MOZ_ASSERT(size != 0);
MOZ_ASSERT((size & chunksize_mask) == 0);
MOZ_ASSERT(alignment != 0);
MOZ_ASSERT((alignment & chunksize_mask) == 0);
// Base allocations can't be fulfilled by recycling because of
// possible deadlock or infinite recursion.
if (CAN_RECYCLE(size) && !base) {
- ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap,
- size, alignment, zeroed);
+ ret = chunk_recycle(size, alignment, zeroed);
if (ret)
goto RETURN;
}
ret = chunk_alloc_mmap(size, alignment);
if (zeroed)
*zeroed = true;
if (ret) {
goto RETURN;
@@ -2084,19 +2081,17 @@ chunk_ensure_zero(void* aPtr, size_t aSi
for (i = 0; i < aSize / sizeof(size_t); i++) {
MOZ_ASSERT(p[i] == 0);
}
}
#endif
}
static void
-chunk_record(RedBlackTree<extent_node_t, ExtentTreeSzTrait>* chunks_szad,
- RedBlackTree<extent_node_t, ExtentTreeTrait>* chunks_ad,
- void *chunk, size_t size, ChunkType chunk_type)
+chunk_record(void *chunk, size_t size, ChunkType chunk_type)
{
extent_node_t *xnode, *node, *prev, *xprev, key;
if (chunk_type != ZEROED_CHUNK) {
if (pages_purge(chunk, size, chunk_type == HUGE_CHUNK)) {
chunk_type = ZEROED_CHUNK;
}
}
@@ -2108,70 +2103,70 @@ chunk_record(RedBlackTree<extent_node_t,
* held.
*/
xnode = base_node_alloc();
/* Use xprev to implement conditional deferred deallocation of prev. */
xprev = nullptr;
chunks_mtx.Lock();
key.addr = (void *)((uintptr_t)chunk + size);
- node = chunks_ad->SearchOrNext(&key);
+ node = gChunksByAddress.SearchOrNext(&key);
/* Try to coalesce forward. */
if (node && node->addr == key.addr) {
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
*/
- chunks_szad->Remove(node);
+ gChunksBySize.Remove(node);
node->addr = chunk;
node->size += size;
if (node->chunk_type != chunk_type) {
node->chunk_type = RECYCLED_CHUNK;
}
- chunks_szad->Insert(node);
+ gChunksBySize.Insert(node);
} else {
/* Coalescing forward failed, so insert a new node. */
if (!xnode) {
/*
* base_node_alloc() failed, which is an exceedingly
* unlikely failure. Leak chunk; its pages have
* already been purged, so this is only a virtual
* memory leak.
*/
goto label_return;
}
node = xnode;
xnode = nullptr; /* Prevent deallocation below. */
node->addr = chunk;
node->size = size;
node->chunk_type = chunk_type;
- chunks_ad->Insert(node);
- chunks_szad->Insert(node);
+ gChunksByAddress.Insert(node);
+ gChunksBySize.Insert(node);
}
/* Try to coalesce backward. */
- prev = chunks_ad->Prev(node);
+ prev = gChunksByAddress.Prev(node);
if (prev && (void *)((uintptr_t)prev->addr + prev->size) ==
chunk) {
/*
* Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only
* remove/insert node from/into chunks_szad.
*/
- chunks_szad->Remove(prev);
- chunks_ad->Remove(prev);
-
- chunks_szad->Remove(node);
+ gChunksBySize.Remove(prev);
+ gChunksByAddress.Remove(prev);
+
+ gChunksBySize.Remove(node);
node->addr = prev->addr;
node->size += prev->size;
if (node->chunk_type != prev->chunk_type) {
node->chunk_type = RECYCLED_CHUNK;
}
- chunks_szad->Insert(node);
+ gChunksBySize.Insert(node);
xprev = prev;
}
recycled_size += size;
label_return:
chunks_mtx.Unlock();
@@ -2204,17 +2199,17 @@ chunk_dealloc(void *chunk, size_t size,
size_t to_recycle;
if (size > recycle_remaining) {
to_recycle = recycle_remaining;
// Drop pages that would overflow the recycle limit
pages_trim(chunk, size, 0, to_recycle);
} else {
to_recycle = size;
}
- chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, to_recycle, type);
+ chunk_record(chunk, to_recycle, type);
return;
}
}
pages_unmap(chunk, size);
}
#undef CAN_RECYCLE
@@ -4365,18 +4360,18 @@ MALLOC_OUT:
/* Various sanity checks that regard configuration. */
MOZ_ASSERT(quantum >= sizeof(void *));
MOZ_ASSERT(quantum <= pagesize);
MOZ_ASSERT(chunksize >= pagesize);
MOZ_ASSERT(quantum * 4 <= chunksize);
/* Initialize chunks data. */
chunks_mtx.Init();
- chunks_szad_mmap.Init();
- chunks_ad_mmap.Init();
+ gChunksBySize.Init();
+ gChunksByAddress.Init();
/* Initialize huge allocation data. */
huge_mtx.Init();
huge.Init();
huge_allocated = 0;
huge_mapped = 0;
/* Initialize base allocation data structures. */