Bug 1401099 - Move arena_chunk_dealloc to a method of arena_t. r?njn
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -704,19 +704,22 @@ struct arena_t {
malloc_spinlock_t mLock;
arena_stats_t mStats;
/* Tree of dirty-page-containing chunks this arena manages. */
arena_chunk_tree_t mChunksDirty;
#ifdef MALLOC_DOUBLE_PURGE
+private:
/* Head of a linked list of MADV_FREE'd-page-containing chunks this
* arena manages. */
mozilla::DoublyLinkedList<arena_chunk_t> mChunksMAdvised;
+
+public:
#endif
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most
* recently freed chunk. The spare is left in the arena's chunk trees
* until it is deleted.
*
@@ -768,16 +771,18 @@ struct arena_t {
* --------+------+
*/
arena_bin_t mBins[1]; /* Dynamically sized. */
bool Init();
void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
+ void DeallocChunk(arena_chunk_t* aChunk);
+
void Purge(bool aAll);
void HardPurge();
};
/******************************************************************************/
/*
* Data.
@@ -2708,48 +2713,45 @@ arena_t::InitChunk(arena_chunk_t* aChunk
arena_avail_tree_insert(&mRunsAvail,
&aChunk->map[arena_chunk_header_npages]);
#ifdef MALLOC_DOUBLE_PURGE
new (&aChunk->chunks_madvised_elem) mozilla::DoublyLinkedListElement<arena_chunk_t>();
#endif
}
-static void
-arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
+void
+arena_t::DeallocChunk(arena_chunk_t* aChunk)
{
-
- if (arena->mSpare) {
- if (arena->mSpare->ndirty > 0) {
- arena_chunk_tree_dirty_remove(
- &chunk->arena->mChunksDirty, arena->mSpare);
- arena->mNumDirty -= arena->mSpare->ndirty;
- arena->mStats.committed -= arena->mSpare->ndirty;
- }
+ if (mSpare) {
+ if (mSpare->ndirty > 0) {
+ arena_chunk_tree_dirty_remove(&aChunk->arena->mChunksDirty, mSpare);
+ mNumDirty -= mSpare->ndirty;
+ mStats.committed -= mSpare->ndirty;
+ }
#ifdef MALLOC_DOUBLE_PURGE
- if (arena->mChunksMAdvised.ElementProbablyInList(arena->mSpare)) {
- arena->mChunksMAdvised.remove(arena->mSpare);
- }
+ if (mChunksMAdvised.ElementProbablyInList(mSpare)) {
+ mChunksMAdvised.remove(mSpare);
+ }
#endif
- chunk_dealloc((void *)arena->mSpare, chunksize, ARENA_CHUNK);
- arena->mStats.mapped -= chunksize;
- arena->mStats.committed -= arena_chunk_header_npages;
- }
-
- /*
- * Remove run from the tree of available runs, so that the arena does not use it.
- * Dirty page flushing only uses the tree of dirty chunks, so leaving this
- * chunk in the chunks_* trees is sufficient for that purpose.
- */
- arena_avail_tree_remove(&arena->mRunsAvail,
- &chunk->map[arena_chunk_header_npages]);
-
- arena->mSpare = chunk;
+ chunk_dealloc((void*)mSpare, chunksize, ARENA_CHUNK);
+ mStats.mapped -= chunksize;
+ mStats.committed -= arena_chunk_header_npages;
+ }
+
+ /*
+ * Remove run from the tree of available runs, so that the arena does not use it.
+ * Dirty page flushing only uses the tree of dirty chunks, so leaving this
+ * chunk in the chunks_* trees is sufficient for that purpose.
+ */
+ arena_avail_tree_remove(&mRunsAvail, &aChunk->map[arena_chunk_header_npages]);
+
+ mSpare = aChunk;
}
static arena_run_t *
arena_run_alloc(arena_t *arena, arena_bin_t *bin, size_t size, bool large,
bool zero)
{
arena_run_t *run;
arena_chunk_map_t *mapelm, key;
@@ -2990,18 +2992,19 @@ arena_run_dalloc(arena_t *arena, arena_r
(chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
}
/* Insert into tree of available runs, now that coalescing is complete. */
arena_avail_tree_insert(&arena->mRunsAvail, &chunk->map[run_ind]);
/* Deallocate chunk if it is now completely unused. */
if ((chunk->map[arena_chunk_header_npages].bits & (~pagesize_mask |
- CHUNK_MAP_ALLOCATED)) == arena_maxclass)
- arena_chunk_dealloc(arena, chunk);
+ CHUNK_MAP_ALLOCATED)) == arena_maxclass) {
+ arena->DeallocChunk(chunk);
+ }
/* Enforce mMaxDirty. */
if (arena->mNumDirty > arena->mMaxDirty) {
arena->Purge(false);
}
}
static void