Bug 1401099 - Move arena_run_dalloc to a method of arena_t. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 15 Sep 2017 18:01:27 +0900
changeset 667407 499c66f296efbb04408b9cef572dfdea05413722
parent 667406 4339539414dd8e68577ab6289639c3b8fc4d202f
child 667408 6c1f51772e15a6366d9e292a81b2d4554469ea5d
push id80694
push userbmo:mh+mozilla@glandium.org
push dateWed, 20 Sep 2017 02:07:21 +0000
reviewersnjn
bugs1401099
milestone57.0a1
Bug 1401099 - Move arena_run_dalloc to a method of arena_t. r?njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -771,21 +771,23 @@ public:
    */
   arena_bin_t mBins[1]; /* Dynamically sized. */
 
   bool Init();
 
 private:
   void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
 
+  void DeallocChunk(arena_chunk_t* aChunk);
+
 public:
-  void DeallocChunk(arena_chunk_t* aChunk);
-
   arena_run_t* AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero);
 
+  void DallocRun(arena_run_t* aRun, bool aDirty);
+
   void Purge(bool aAll);
 
   void HardPurge();
 };
 
 /******************************************************************************/
 /*
  * Data.
@@ -2891,167 +2893,164 @@ arena_t::Purge(bool aAll)
         mChunksMAdvised.remove(chunk);
       }
       mChunksMAdvised.pushFront(chunk);
     }
 #endif
   }
 }
 
-static void
-arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
+void
+arena_t::DallocRun(arena_run_t* aRun, bool aDirty)
 {
-	arena_chunk_t *chunk;
-	size_t size, run_ind, run_pages;
-
-	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
-	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk)
-	    >> pagesize_2pow);
-	MOZ_DIAGNOSTIC_ASSERT(run_ind >= arena_chunk_header_npages);
-	MOZ_DIAGNOSTIC_ASSERT(run_ind < chunk_npages);
-	if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0)
-		size = chunk->map[run_ind].bits & ~pagesize_mask;
-	else
-		size = run->bin->run_size;
-	run_pages = (size >> pagesize_2pow);
-
-	/* Mark pages as unallocated in the chunk map. */
-	if (dirty) {
-		size_t i;
-
-		for (i = 0; i < run_pages; i++) {
-			MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY)
-			    == 0);
-			chunk->map[run_ind + i].bits = CHUNK_MAP_DIRTY;
-		}
-
-		if (chunk->ndirty == 0) {
-			arena_chunk_tree_dirty_insert(&arena->mChunksDirty,
-			    chunk);
-		}
-		chunk->ndirty += run_pages;
-		arena->mNumDirty += run_pages;
-	} else {
-		size_t i;
-
-		for (i = 0; i < run_pages; i++) {
-			chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE |
-			    CHUNK_MAP_ALLOCATED);
-		}
-	}
-	chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
-	    pagesize_mask);
-	chunk->map[run_ind+run_pages-1].bits = size |
-	    (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
-
-	/* Try to coalesce forward. */
-	if (run_ind + run_pages < chunk_npages &&
-	    (chunk->map[run_ind+run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) {
-		size_t nrun_size = chunk->map[run_ind+run_pages].bits &
-		    ~pagesize_mask;
-
-		/*
-		 * Remove successor from tree of available runs; the coalesced run is
-		 * inserted later.
-		 */
-		arena_avail_tree_remove(&arena->mRunsAvail,
-		    &chunk->map[run_ind+run_pages]);
-
-		size += nrun_size;
-		run_pages = size >> pagesize_2pow;
-
-		MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind+run_pages-1].bits & ~pagesize_mask)
-		    == nrun_size);
-		chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
-		    pagesize_mask);
-		chunk->map[run_ind+run_pages-1].bits = size |
-		    (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
-	}
-
-	/* Try to coalesce backward. */
-	if (run_ind > arena_chunk_header_npages && (chunk->map[run_ind-1].bits &
-	    CHUNK_MAP_ALLOCATED) == 0) {
-		size_t prun_size = chunk->map[run_ind-1].bits & ~pagesize_mask;
-
-		run_ind -= prun_size >> pagesize_2pow;
-
-		/*
-		 * Remove predecessor from tree of available runs; the coalesced run is
-		 * inserted later.
-		 */
-		arena_avail_tree_remove(&arena->mRunsAvail,
-		    &chunk->map[run_ind]);
-
-		size += prun_size;
-		run_pages = size >> pagesize_2pow;
-
-		MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind].bits & ~pagesize_mask) ==
-		    prun_size);
-		chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
-		    pagesize_mask);
-		chunk->map[run_ind+run_pages-1].bits = size |
-		    (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
-	}
-
-	/* Insert into tree of available runs, now that coalescing is complete. */
-	arena_avail_tree_insert(&arena->mRunsAvail, &chunk->map[run_ind]);
-
-	/* Deallocate chunk if it is now completely unused. */
-	if ((chunk->map[arena_chunk_header_npages].bits & (~pagesize_mask |
-	    CHUNK_MAP_ALLOCATED)) == arena_maxclass) {
-		arena->DeallocChunk(chunk);
-	}
-
-	/* Enforce mMaxDirty. */
-	if (arena->mNumDirty > arena->mMaxDirty) {
-		arena->Purge(false);
-	}
+  arena_chunk_t* chunk;
+  size_t size, run_ind, run_pages;
+
+  chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(aRun);
+  run_ind = (size_t)((uintptr_t(aRun) - uintptr_t(chunk)) >> pagesize_2pow);
+  MOZ_DIAGNOSTIC_ASSERT(run_ind >= arena_chunk_header_npages);
+  MOZ_DIAGNOSTIC_ASSERT(run_ind < chunk_npages);
+  if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0)
+    size = chunk->map[run_ind].bits & ~pagesize_mask;
+  else
+    size = aRun->bin->run_size;
+  run_pages = (size >> pagesize_2pow);
+
+  /* Mark pages as unallocated in the chunk map. */
+  if (aDirty) {
+    size_t i;
+
+    for (i = 0; i < run_pages; i++) {
+      MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY)
+          == 0);
+      chunk->map[run_ind + i].bits = CHUNK_MAP_DIRTY;
+    }
+
+    if (chunk->ndirty == 0) {
+      arena_chunk_tree_dirty_insert(&mChunksDirty,
+          chunk);
+    }
+    chunk->ndirty += run_pages;
+    mNumDirty += run_pages;
+  } else {
+    size_t i;
+
+    for (i = 0; i < run_pages; i++) {
+      chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE |
+          CHUNK_MAP_ALLOCATED);
+    }
+  }
+  chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
+      pagesize_mask);
+  chunk->map[run_ind+run_pages-1].bits = size |
+      (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
+
+  /* Try to coalesce forward. */
+  if (run_ind + run_pages < chunk_npages &&
+      (chunk->map[run_ind+run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) {
+    size_t nrun_size = chunk->map[run_ind+run_pages].bits &
+        ~pagesize_mask;
+
+    /*
+     * Remove successor from tree of available runs; the coalesced run is
+     * inserted later.
+     */
+    arena_avail_tree_remove(&mRunsAvail,
+        &chunk->map[run_ind+run_pages]);
+
+    size += nrun_size;
+    run_pages = size >> pagesize_2pow;
+
+    MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind+run_pages-1].bits & ~pagesize_mask)
+        == nrun_size);
+    chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
+        pagesize_mask);
+    chunk->map[run_ind+run_pages-1].bits = size |
+        (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
+  }
+
+  /* Try to coalesce backward. */
+  if (run_ind > arena_chunk_header_npages && (chunk->map[run_ind-1].bits &
+      CHUNK_MAP_ALLOCATED) == 0) {
+    size_t prun_size = chunk->map[run_ind-1].bits & ~pagesize_mask;
+
+    run_ind -= prun_size >> pagesize_2pow;
+
+    /*
+     * Remove predecessor from tree of available runs; the coalesced run is
+     * inserted later.
+     */
+    arena_avail_tree_remove(&mRunsAvail, &chunk->map[run_ind]);
+
+    size += prun_size;
+    run_pages = size >> pagesize_2pow;
+
+    MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind].bits & ~pagesize_mask) ==
+        prun_size);
+    chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
+        pagesize_mask);
+    chunk->map[run_ind+run_pages-1].bits = size |
+        (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
+  }
+
+  /* Insert into tree of available runs, now that coalescing is complete. */
+  arena_avail_tree_insert(&mRunsAvail, &chunk->map[run_ind]);
+
+  /* Deallocate chunk if it is now completely unused. */
+  if ((chunk->map[arena_chunk_header_npages].bits & (~pagesize_mask |
+      CHUNK_MAP_ALLOCATED)) == arena_maxclass) {
+    DeallocChunk(chunk);
+  }
+
+  /* Enforce mMaxDirty. */
+  if (mNumDirty > mMaxDirty) {
+    Purge(false);
+  }
 }
 
 static void
 arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
     size_t oldsize, size_t newsize)
 {
 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
 	size_t head_npages = (oldsize - newsize) >> pagesize_2pow;
 
 	MOZ_ASSERT(oldsize > newsize);
 
 	/*
-	 * Update the chunk map so that arena_run_dalloc() can treat the
+	 * Update the chunk map so that arena_t::DallocRun() can treat the
 	 * leading run as separately allocated.
 	 */
 	chunk->map[pageind].bits = (oldsize - newsize) | CHUNK_MAP_LARGE |
 	    CHUNK_MAP_ALLOCATED;
 	chunk->map[pageind+head_npages].bits = newsize | CHUNK_MAP_LARGE |
 	    CHUNK_MAP_ALLOCATED;
 
-	arena_run_dalloc(arena, run, false);
+	arena->DallocRun(run, false);
 }
 
 static void
 arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
     size_t oldsize, size_t newsize, bool dirty)
 {
 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
 	size_t npages = newsize >> pagesize_2pow;
 
 	MOZ_ASSERT(oldsize > newsize);
 
 	/*
-	 * Update the chunk map so that arena_run_dalloc() can treat the
+	 * Update the chunk map so that arena_t::DallocRun() can treat the
 	 * trailing run as separately allocated.
 	 */
 	chunk->map[pageind].bits = newsize | CHUNK_MAP_LARGE |
 	    CHUNK_MAP_ALLOCATED;
 	chunk->map[pageind+npages].bits = (oldsize - newsize) | CHUNK_MAP_LARGE
 	    | CHUNK_MAP_ALLOCATED;
 
-	arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
-	    dirty);
+	arena->DallocRun((arena_run_t*)(uintptr_t(run) + newsize), dirty);
 }
 
 static arena_run_t *
 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
 {
 	arena_chunk_map_t *mapelm;
 	arena_run_t *run;
 	unsigned i, remainder;
@@ -3752,17 +3751,17 @@ arena_dalloc_small(arena_t *arena, arena
 			 */
 			MOZ_DIAGNOSTIC_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
 				run_mapelm);
 			arena_run_tree_remove(&bin->runs, run_mapelm);
 		}
 #if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
 		run->magic = 0;
 #endif
-		arena_run_dalloc(arena, run, true);
+		arena->DallocRun(run, true);
 		bin->stats.curruns--;
 	} else if (run->nfree == 1 && run != bin->runcur) {
 		/*
 		 * Make sure that bin->runcur always refers to the lowest
 		 * non-full run, if one exists.
 		 */
 		if (!bin->runcur)
 			bin->runcur = run;
@@ -3803,17 +3802,17 @@ arena_dalloc_large(arena_t *arena, arena
 {
 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
 	    pagesize_2pow;
 	size_t size = chunk->map[pageind].bits & ~pagesize_mask;
 
 	memset(ptr, kAllocPoison, size);
 	arena->mStats.allocated_large -= size;
 
-	arena_run_dalloc(arena, (arena_run_t *)ptr, true);
+	arena->DallocRun((arena_run_t*)ptr, true);
 }
 
 static inline void
 arena_dalloc(void *ptr, size_t offset)
 {
 	arena_chunk_t *chunk;
 	arena_t *arena;
 	size_t pageind;