--- a/memory/mozjemalloc/mozjemalloc.cpp
+++ b/memory/mozjemalloc/mozjemalloc.cpp
@@ -501,33 +501,41 @@ struct arena_stats_s {
uint64_t ndalloc_large;
};
/******************************************************************************/
/*
* Extent data structures.
*/
+enum ChunkType {
+ UNKNOWN_CHUNK,
+ ZEROED_CHUNK, // chunk only contains zeroes
+ ARENA_CHUNK, // used to back arena runs created by arena_run_alloc
+ HUGE_CHUNK, // used to back huge allocations (e.g. huge_malloc)
+ RECYCLED_CHUNK, // chunk has been stored for future use by chunk_recycle
+};
+
/* Tree of extents. */
typedef struct extent_node_s extent_node_t;
struct extent_node_s {
/* Linkage for the size/address-ordered tree. */
rb_node(extent_node_t) link_szad;
/* Linkage for the address-ordered tree. */
rb_node(extent_node_t) link_ad;
/* Pointer to the extent that this tree node is responsible for. */
void *addr;
/* Total region size. */
size_t size;
- /* True if zero-filled; used by chunk recycling code. */
- bool zeroed;
+ /* What type of chunk is there; used by chunk recycling code. */
+ ChunkType chunk_type;
};
typedef rb_tree(extent_node_t) extent_tree_t;
/******************************************************************************/
/*
* Radix tree data structures.
*/
@@ -790,23 +798,16 @@ struct arena_s {
* --------+------+
* 35 | 1024 |
* 36 | 2048 |
* --------+------+
*/
arena_bin_t bins[1]; /* Dynamically sized. */
};
-enum ChunkType {
- UNKNOWN_CHUNK,
- ARENA_CHUNK, // used to back arena runs created by arena_run_alloc
- HUGE_CHUNK, // used to back huge allocations (e.g. huge_malloc)
- RECYCLED_CHUNK, // chunk has been stored for future use by chunk_recycle
-};
-
/******************************************************************************/
/*
* Data.
*/
/*
* When MALLOC_STATIC_SIZES is defined most of the parameters
* controlling the malloc behavior are defined as compile-time constants
@@ -1029,17 +1030,17 @@ static size_t opt_chunk_2pow = CHUNK_2PO
#endif
/******************************************************************************/
/*
* Begin forward declarations.
*/
static void *chunk_alloc(size_t size, size_t alignment, bool base, bool zero);
-static void chunk_dealloc(void *chunk, size_t size, enum ChunkType type);
+static void chunk_dealloc(void *chunk, size_t size, ChunkType chunk_type);
static arena_t *arenas_extend();
static void *huge_malloc(size_t size, bool zero);
static void *huge_palloc(size_t size, size_t alignment, bool zero);
static void *huge_ralloc(void *ptr, size_t size, size_t oldsize);
static void huge_dalloc(void *ptr);
#ifdef MOZ_MEMORY_WINDOWS
extern "C"
#else
@@ -1976,17 +1977,17 @@ pages_purge(void *addr, size_t length, b
static void *
chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
size_t alignment, bool base, bool *zero)
{
void *ret;
extent_node_t *node;
extent_node_t key;
size_t alloc_size, leadsize, trailsize;
- bool zeroed;
+ ChunkType chunk_type;
if (base) {
/*
* This function may need to call base_node_{,de}alloc(), but
* the current chunk allocation request is on behalf of the
* base allocator. Avoid deadlock (and if that weren't an
* issue, potential for infinite recursion) by returning nullptr.
*/
@@ -2005,18 +2006,18 @@ chunk_recycle(extent_tree_t *chunks_szad
malloc_mutex_unlock(&chunks_mtx);
return nullptr;
}
leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
(uintptr_t)node->addr;
MOZ_ASSERT(node->size >= leadsize + size);
trailsize = node->size - leadsize - size;
ret = (void *)((uintptr_t)node->addr + leadsize);
- zeroed = node->zeroed;
- if (zeroed)
+ chunk_type = node->chunk_type;
+ if (chunk_type == ZEROED_CHUNK)
*zero = true;
/* Remove node from the tree. */
extent_tree_szad_remove(chunks_szad, node);
extent_tree_ad_remove(chunks_ad, node);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
node->size = leadsize;
extent_tree_szad_insert(chunks_szad, node);
@@ -2031,40 +2032,40 @@ chunk_recycle(extent_tree_t *chunks_szad
* base_node_alloc() can cause a new base chunk to be
* allocated. Drop chunks_mtx in order to avoid
* deadlock, and if node allocation fails, deallocate
* the result before returning an error.
*/
malloc_mutex_unlock(&chunks_mtx);
node = base_node_alloc();
if (!node) {
- chunk_dealloc(ret, size, RECYCLED_CHUNK);
+ chunk_dealloc(ret, size, chunk_type);
return nullptr;
}
malloc_mutex_lock(&chunks_mtx);
}
node->addr = (void *)((uintptr_t)(ret) + size);
node->size = trailsize;
- node->zeroed = zeroed;
+ node->chunk_type = chunk_type;
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
node = nullptr;
}
recycled_size -= size;
malloc_mutex_unlock(&chunks_mtx);
if (node)
base_node_dealloc(node);
#ifdef MALLOC_DECOMMIT
pages_commit(ret, size);
#endif
if (*zero) {
- if (zeroed == false)
+ if (chunk_type != ZEROED_CHUNK)
memset(ret, 0, size);
#ifdef DEBUG
else {
size_t i;
size_t *p = (size_t *)(uintptr_t)ret;
for (i = 0; i < size / sizeof(size_t); i++)
MOZ_ASSERT(p[i] == 0);
@@ -2119,22 +2120,25 @@ RETURN:
}
MOZ_ASSERT(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
}
static void
chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
- size_t size, enum ChunkType type)
+ size_t size, ChunkType chunk_type)
{
- bool zeroed;
extent_node_t *xnode, *node, *prev, *xprev, key;
- zeroed = pages_purge(chunk, size, type == HUGE_CHUNK);
+ if (chunk_type != ZEROED_CHUNK) {
+ if (pages_purge(chunk, size, chunk_type == HUGE_CHUNK)) {
+ chunk_type = ZEROED_CHUNK;
+ }
+ }
/*
* Allocate a node before acquiring chunks_mtx even though it might not
* be needed, because base_node_alloc() may cause a new base chunk to
* be allocated, which could cause deadlock if chunks_mtx were already
* held.
*/
xnode = base_node_alloc();
@@ -2149,34 +2153,36 @@ chunk_record(extent_tree_t *chunks_szad,
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
*/
extent_tree_szad_remove(chunks_szad, node);
node->addr = chunk;
node->size += size;
- node->zeroed = node->zeroed && zeroed;
+ if (node->chunk_type != chunk_type) {
+ node->chunk_type = RECYCLED_CHUNK;
+ }
extent_tree_szad_insert(chunks_szad, node);
} else {
/* Coalescing forward failed, so insert a new node. */
if (!xnode) {
/*
* base_node_alloc() failed, which is an exceedingly
* unlikely failure. Leak chunk; its pages have
* already been purged, so this is only a virtual
* memory leak.
*/
goto label_return;
}
node = xnode;
xnode = nullptr; /* Prevent deallocation below. */
node->addr = chunk;
node->size = size;
- node->zeroed = zeroed;
+ node->chunk_type = chunk_type;
extent_tree_ad_insert(chunks_ad, node);
extent_tree_szad_insert(chunks_szad, node);
}
/* Try to coalesce backward. */
prev = extent_tree_ad_prev(chunks_ad, node);
if (prev && (void *)((uintptr_t)prev->addr + prev->size) ==
chunk) {
@@ -2186,17 +2192,19 @@ chunk_record(extent_tree_t *chunks_szad,
* remove/insert node from/into chunks_szad.
*/
extent_tree_szad_remove(chunks_szad, prev);
extent_tree_ad_remove(chunks_ad, prev);
extent_tree_szad_remove(chunks_szad, node);
node->addr = prev->addr;
node->size += prev->size;
- node->zeroed = (node->zeroed && prev->zeroed);
+ if (node->chunk_type != prev->chunk_type) {
+ node->chunk_type = RECYCLED_CHUNK;
+ }
extent_tree_szad_insert(chunks_szad, node);
xprev = prev;
}
recycled_size += size;
label_return:
@@ -2219,17 +2227,17 @@ chunk_dalloc_mmap(void *chunk, size_t si
pages_unmap(chunk, size);
return false;
}
#undef CAN_RECYCLE
static void
-chunk_dealloc(void *chunk, size_t size, enum ChunkType type)
+chunk_dealloc(void *chunk, size_t size, ChunkType type)
{
MOZ_ASSERT(chunk);
MOZ_ASSERT(CHUNK_ADDR2BASE(chunk) == chunk);
MOZ_ASSERT(size != 0);
MOZ_ASSERT((size & chunksize_mask) == 0);
malloc_rtree_set(chunk_rtree, (uintptr_t)chunk, nullptr);