Bug 1401099 - Move arena_chunk_init to a method of arena_t. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 15 Sep 2017 17:43:36 +0900
changeset 667404 2ec55e9253db34dc80125071467a14f4379bea3e
parent 667403 28ac2d3fa60e0d793f47aaff2db1e0ca01a97b57
child 667405 0e6fef6da5b288e984bed1c4003b314e67051282
push id80694
push userbmo:mh+mozilla@glandium.org
push dateWed, 20 Sep 2017 02:07:21 +0000
reviewersnjn
bugs1401099
milestone57.0a1
Bug 1401099 - Move arena_chunk_init to a method of arena_t. r?njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -766,16 +766,18 @@ struct arena_t {
    *       35  | 1024 |
    *       36  | 2048 |
    *   --------+------+
    */
   arena_bin_t mBins[1]; /* Dynamically sized. */
 
   bool Init();
 
+  void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
+
   void Purge(bool aAll);
 
   void HardPurge();
 };
 
 /******************************************************************************/
 /*
  * Data.
@@ -2647,71 +2649,72 @@ arena_run_split(arena_t *arena, arena_ru
 	 */
 	if (large)
 		chunk->map[run_ind].bits |= size;
 
 	if (chunk->ndirty == 0 && old_ndirty > 0)
 		arena_chunk_tree_dirty_remove(&arena->mChunksDirty, chunk);
 }
 
-static void
-arena_chunk_init(arena_t *arena, arena_chunk_t *chunk, bool zeroed)
+void
+arena_t::InitChunk(arena_chunk_t* aChunk, bool aZeroed)
 {
-	size_t i;
-	/* WARNING: The following relies on !zeroed meaning "used to be an arena
+  size_t i;
+  /* WARNING: The following relies on !aZeroed meaning "used to be an arena
          * chunk".
          * When the chunk we're initializating as an arena chunk is zeroed, we
          * mark all runs are decommitted and zeroed.
          * When it is not, which we can assume means it's a recycled arena chunk,
          * all it can contain is an arena chunk header (which we're overwriting),
          * and zeroed or poisoned memory (because a recycled arena chunk will
          * have been emptied before being recycled). In that case, we can get
          * away with reusing the chunk as-is, marking all runs as madvised.
          */
-	size_t flags = zeroed ? CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED
-	                      : CHUNK_MAP_MADVISED;
-
-	arena->mStats.mapped += chunksize;
-
-	chunk->arena = arena;
-
-	/*
-	 * Claim that no pages are in use, since the header is merely overhead.
-	 */
-	chunk->ndirty = 0;
-
-	/* Initialize the map to contain one maximal free untouched run. */
+  size_t flags = aZeroed ? CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED
+                         : CHUNK_MAP_MADVISED;
+
+  mStats.mapped += chunksize;
+
+  aChunk->arena = this;
+
+  /*
+   * Claim that no pages are in use, since the header is merely overhead.
+   */
+  aChunk->ndirty = 0;
+
+  /* Initialize the map to contain one maximal free untouched run. */
 #ifdef MALLOC_DECOMMIT
-	arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
-	                   (arena_chunk_header_npages << pagesize_2pow));
+  arena_run_t* run = (arena_run_t*)(uintptr_t(aChunk) +
+                     (arena_chunk_header_npages << pagesize_2pow));
 #endif
 
-	for (i = 0; i < arena_chunk_header_npages; i++)
-		chunk->map[i].bits = 0;
-	chunk->map[i].bits = arena_maxclass | flags;
-	for (i++; i < chunk_npages-1; i++) {
-		chunk->map[i].bits = flags;
-	}
-	chunk->map[chunk_npages-1].bits = arena_maxclass | flags;
+  for (i = 0; i < arena_chunk_header_npages; i++) {
+    aChunk->map[i].bits = 0;
+  }
+  aChunk->map[i].bits = arena_maxclass | flags;
+  for (i++; i < chunk_npages-1; i++) {
+    aChunk->map[i].bits = flags;
+  }
+  aChunk->map[chunk_npages-1].bits = arena_maxclass | flags;
 
 #ifdef MALLOC_DECOMMIT
-	/*
-	 * Start out decommitted, in order to force a closer correspondence
-	 * between dirty pages and committed untouched pages.
-	 */
-	pages_decommit(run, arena_maxclass);
+  /*
+   * Start out decommitted, in order to force a closer correspondence
+   * between dirty pages and committed untouched pages.
+   */
+  pages_decommit(run, arena_maxclass);
 #endif
-	arena->mStats.committed += arena_chunk_header_npages;
-
-	/* Insert the run into the tree of available runs. */
-	arena_avail_tree_insert(&arena->mRunsAvail,
-	    &chunk->map[arena_chunk_header_npages]);
+  mStats.committed += arena_chunk_header_npages;
+
+  /* Insert the run into the tree of available runs. */
+  arena_avail_tree_insert(&mRunsAvail,
+      &aChunk->map[arena_chunk_header_npages]);
 
 #ifdef MALLOC_DOUBLE_PURGE
-	new (&chunk->chunks_madvised_elem) mozilla::DoublyLinkedListElement<arena_chunk_t>();
+  new (&aChunk->chunks_madvised_elem) mozilla::DoublyLinkedListElement<arena_chunk_t>();
 #endif
 }
 
 static void
 arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
 {
 
 	if (arena->mSpare) {
@@ -2789,17 +2792,17 @@ arena_run_alloc(arena_t *arena, arena_bi
 	 */
 	{
 		bool zeroed;
 		arena_chunk_t *chunk = (arena_chunk_t *)
 		    chunk_alloc(chunksize, chunksize, false, &zeroed);
 		if (!chunk)
 			return nullptr;
 
-		arena_chunk_init(arena, chunk, zeroed);
+		arena->InitChunk(chunk, zeroed);
 		run = (arena_run_t *)((uintptr_t)chunk +
 		    (arena_chunk_header_npages << pagesize_2pow));
 	}
 	/* Update page map. */
 	arena_run_split(arena, run, size, large, zero);
 	return (run);
 }