--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -457,17 +457,17 @@ struct arena_stats_t {
/******************************************************************************/
/*
* Extent data structures.
*/
enum ChunkType {
UNKNOWN_CHUNK,
ZEROED_CHUNK, // chunk only contains zeroes
- ARENA_CHUNK, // used to back arena runs created by arena_run_alloc
+ ARENA_CHUNK, // used to back arena runs created by arena_t::AllocRun
HUGE_CHUNK, // used to back huge allocations (e.g. huge_malloc)
RECYCLED_CHUNK, // chunk has been stored for future use by chunk_recycle
};
/* Tree of extents. */
struct extent_node_t {
/* Linkage for the size/address-ordered tree. */
rb_node(extent_node_t) link_szad;
@@ -703,37 +703,36 @@ struct arena_t {
/* All operations on this arena require that lock be locked. */
malloc_spinlock_t mLock;
arena_stats_t mStats;
/* Tree of dirty-page-containing chunks this arena manages. */
arena_chunk_tree_t mChunksDirty;
+private:
#ifdef MALLOC_DOUBLE_PURGE
-private:
/* Head of a linked list of MADV_FREE'd-page-containing chunks this
* arena manages. */
mozilla::DoublyLinkedList<arena_chunk_t> mChunksMAdvised;
-
-public:
#endif
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most
* recently freed chunk. The spare is left in the arena's chunk trees
* until it is deleted.
*
* There is one spare chunk per arena, rather than one spare total, in
* order to avoid interactions between multiple threads that could make
* a single spare inadequate.
*/
arena_chunk_t* mSpare;
+public:
/*
* Current count of pages within unused runs that are potentially
* dirty, and for which madvise(... MADV_FREE) has not been called. By
* tracking this, we can institute a limit on how much dirty unused
* memory is mapped for each arena.
*/
size_t mNumDirty;
/*
@@ -769,20 +768,24 @@ public:
* 35 | 1024 |
* 36 | 2048 |
* --------+------+
*/
arena_bin_t mBins[1]; /* Dynamically sized. */
bool Init();
+private:
void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
+public:
void DeallocChunk(arena_chunk_t* aChunk);
+ arena_run_t* AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero);
+
void Purge(bool aAll);
void HardPurge();
};
/******************************************************************************/
/*
* Data.
@@ -2744,73 +2747,69 @@ arena_t::DeallocChunk(arena_chunk_t* aCh
* Dirty page flushing only uses the tree of dirty chunks, so leaving this
* chunk in the chunks_* trees is sufficient for that purpose.
*/
arena_avail_tree_remove(&mRunsAvail, &aChunk->map[arena_chunk_header_npages]);
mSpare = aChunk;
}
-static arena_run_t *
-arena_run_alloc(arena_t *arena, arena_bin_t *bin, size_t size, bool large,
- bool zero)
+arena_run_t*
+arena_t::AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero)
{
- arena_run_t *run;
- arena_chunk_map_t *mapelm, key;
-
- MOZ_ASSERT(size <= arena_maxclass);
- MOZ_ASSERT((size & pagesize_mask) == 0);
-
- /* Search the arena's chunks for the lowest best fit. */
- key.bits = size | CHUNK_MAP_KEY;
- mapelm = arena_avail_tree_nsearch(&arena->mRunsAvail, &key);
- if (mapelm) {
- arena_chunk_t *chunk =
- (arena_chunk_t*)CHUNK_ADDR2BASE(mapelm);
- size_t pageind = ((uintptr_t)mapelm -
- (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t);
-
- run = (arena_run_t *)((uintptr_t)chunk + (pageind
- << pagesize_2pow));
- arena_run_split(arena, run, size, large, zero);
- return (run);
- }
-
- if (arena->mSpare) {
- /* Use the spare. */
- arena_chunk_t *chunk = arena->mSpare;
- arena->mSpare = nullptr;
- run = (arena_run_t *)((uintptr_t)chunk +
- (arena_chunk_header_npages << pagesize_2pow));
- /* Insert the run into the tree of available runs. */
- arena_avail_tree_insert(&arena->mRunsAvail,
- &chunk->map[arena_chunk_header_npages]);
- arena_run_split(arena, run, size, large, zero);
- return (run);
- }
-
- /*
- * No usable runs. Create a new chunk from which to allocate
- * the run.
- */
- {
- bool zeroed;
- arena_chunk_t *chunk = (arena_chunk_t *)
- chunk_alloc(chunksize, chunksize, false, &zeroed);
- if (!chunk)
- return nullptr;
-
- arena->InitChunk(chunk, zeroed);
- run = (arena_run_t *)((uintptr_t)chunk +
- (arena_chunk_header_npages << pagesize_2pow));
- }
- /* Update page map. */
- arena_run_split(arena, run, size, large, zero);
- return (run);
+ arena_run_t* run;
+ arena_chunk_map_t* mapelm;
+ arena_chunk_map_t key;
+
+ MOZ_ASSERT(aSize <= arena_maxclass);
+ MOZ_ASSERT((aSize & pagesize_mask) == 0);
+
+ /* Search the arena's chunks for the lowest best fit. */
+ key.bits = aSize | CHUNK_MAP_KEY;
+ mapelm = arena_avail_tree_nsearch(&mRunsAvail, &key);
+ if (mapelm) {
+ arena_chunk_t* chunk =
+ (arena_chunk_t*)CHUNK_ADDR2BASE(mapelm);
+ size_t pageind = (uintptr_t(mapelm) - uintptr_t(chunk->map)) /
+ sizeof(arena_chunk_map_t);
+
+ run = (arena_run_t*)(uintptr_t(chunk) + (pageind << pagesize_2pow));
+ arena_run_split(this, run, aSize, aLarge, aZero);
+ return run;
+ }
+
+ if (mSpare) {
+ /* Use the spare. */
+ arena_chunk_t* chunk = mSpare;
+ mSpare = nullptr;
+ run = (arena_run_t*)(uintptr_t(chunk) + (arena_chunk_header_npages << pagesize_2pow));
+ /* Insert the run into the tree of available runs. */
+ arena_avail_tree_insert(&mRunsAvail, &chunk->map[arena_chunk_header_npages]);
+ arena_run_split(this, run, aSize, aLarge, aZero);
+ return run;
+ }
+
+ /*
+ * No usable runs. Create a new chunk from which to allocate
+ * the run.
+ */
+ {
+ bool zeroed;
+ arena_chunk_t* chunk = (arena_chunk_t*)
+ chunk_alloc(chunksize, chunksize, false, &zeroed);
+ if (!chunk) {
+ return nullptr;
+ }
+
+ InitChunk(chunk, zeroed);
+ run = (arena_run_t*)(uintptr_t(chunk) + (arena_chunk_header_npages << pagesize_2pow));
+ }
+ /* Update page map. */
+ arena_run_split(this, run, aSize, aLarge, aZero);
+ return run;
}
void
arena_t::Purge(bool aAll)
{
arena_chunk_t* chunk;
size_t i, npages;
/* If all is set purge all dirty pages. */
@@ -3063,21 +3062,21 @@ arena_bin_nonfull_run_get(arena_t *arena
/* run is guaranteed to have available space. */
arena_run_tree_remove(&bin->runs, mapelm);
run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
return (run);
}
/* No existing runs have any space available. */
/* Allocate a new run. */
- run = arena_run_alloc(arena, bin, bin->run_size, false, false);
+ run = arena->AllocRun(bin, bin->run_size, false, false);
if (!run)
return nullptr;
/*
- * Don't initialize if a race in arena_run_alloc() allowed an existing
+ * Don't initialize if a race in arena_t::AllocRun() allowed an existing
* run to become usable.
*/
if (run == bin->runcur)
return (run);
/* Initialize run internals. */
run->bin = bin;
@@ -3275,17 +3274,17 @@ arena_malloc_small(arena_t *arena, size_
static void *
arena_malloc_large(arena_t *arena, size_t size, bool zero)
{
void *ret;
/* Large allocation. */
size = PAGE_CEILING(size);
malloc_spin_lock(&arena->mLock);
- ret = (void *)arena_run_alloc(arena, nullptr, size, true, zero);
+ ret = arena->AllocRun(nullptr, size, true, zero);
if (!ret) {
malloc_spin_unlock(&arena->mLock);
return nullptr;
}
arena->mStats.allocated_large += size;
malloc_spin_unlock(&arena->mLock);
if (zero == false) {
@@ -3342,17 +3341,17 @@ arena_palloc(arena_t *arena, size_t alig
void *ret;
size_t offset;
arena_chunk_t *chunk;
MOZ_ASSERT((size & pagesize_mask) == 0);
MOZ_ASSERT((alignment & pagesize_mask) == 0);
malloc_spin_lock(&arena->mLock);
- ret = (void *)arena_run_alloc(arena, nullptr, alloc_size, true, false);
+ ret = arena->AllocRun(nullptr, alloc_size, true, false);
if (!ret) {
malloc_spin_unlock(&arena->mLock);
return nullptr;
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
offset = (uintptr_t)ret & (alignment - 1);