Bug 1401099 - Move arena_new to a method of arena_t. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 15 Sep 2017 17:38:58 +0900
changeset 667403 28ac2d3fa60e0d793f47aaff2db1e0ca01a97b57
parent 667402 008caca6121af3deb15e16a95a20e765dc487e9f
child 667404 2ec55e9253db34dc80125071467a14f4379bea3e
push id80694
push userbmo:mh+mozilla@glandium.org
push dateWed, 20 Sep 2017 02:07:21 +0000
reviewersnjn
bugs1401099
milestone57.0a1
Bug 1401099 - Move arena_new to a method of arena_t. r?njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -764,16 +764,18 @@ struct arena_t {
    *       34  |  512 |
    *   --------+------+
    *       35  | 1024 |
    *       36  | 2048 |
    *   --------+------+
    */
   arena_bin_t mBins[1]; /* Dynamically sized. */
 
+  bool Init();
+
   void Purge(bool aAll);
 
   void HardPurge();
 };
 
 /******************************************************************************/
 /*
  * Data.
@@ -4014,89 +4016,89 @@ iralloc(void *ptr, size_t size)
 	oldsize = isalloc(ptr);
 
 	if (size <= arena_maxclass)
 		return (arena_ralloc(ptr, size, oldsize));
 	else
 		return (huge_ralloc(ptr, size, oldsize));
 }
 
-static bool
-arena_new(arena_t *arena)
+bool
+arena_t::Init()
 {
-	unsigned i;
-	arena_bin_t *bin;
-	size_t prev_run_size;
-
-	if (malloc_spin_init(&arena->mLock))
-		return (true);
-
-	memset(&arena->mStats, 0, sizeof(arena_stats_t));
-
-	/* Initialize chunks. */
-	arena_chunk_tree_dirty_new(&arena->mChunksDirty);
+  unsigned i;
+  arena_bin_t* bin;
+  size_t prev_run_size;
+
+  if (malloc_spin_init(&mLock))
+    return true;
+
+  memset(&mStats, 0, sizeof(arena_stats_t));
+
+  /* Initialize chunks. */
+  arena_chunk_tree_dirty_new(&mChunksDirty);
 #ifdef MALLOC_DOUBLE_PURGE
-	new (&arena->mChunksMAdvised) mozilla::DoublyLinkedList<arena_chunk_t>();
+  new (&mChunksMAdvised) mozilla::DoublyLinkedList<arena_chunk_t>();
 #endif
-	arena->mSpare = nullptr;
-
-	arena->mNumDirty = 0;
-	// Reduce the maximum amount of dirty pages we allow to be kept on
-	// thread local arenas. TODO: make this more flexible.
-	arena->mMaxDirty = opt_dirty_max >> 3;
-
-	arena_avail_tree_new(&arena->mRunsAvail);
-
-	/* Initialize bins. */
-	prev_run_size = pagesize;
-
-	/* (2^n)-spaced tiny bins. */
-	for (i = 0; i < ntbins; i++) {
-		bin = &arena->mBins[i];
-		bin->runcur = nullptr;
-		arena_run_tree_new(&bin->runs);
-
-		bin->reg_size = (1ULL << (TINY_MIN_2POW + i));
-
-		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
-
-		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-	}
-
-	/* Quantum-spaced bins. */
-	for (; i < ntbins + nqbins; i++) {
-		bin = &arena->mBins[i];
-		bin->runcur = nullptr;
-		arena_run_tree_new(&bin->runs);
-
-		bin->reg_size = quantum * (i - ntbins + 1);
-
-		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
-
-		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-	}
-
-	/* (2^n)-spaced sub-page bins. */
-	for (; i < ntbins + nqbins + nsbins; i++) {
-		bin = &arena->mBins[i];
-		bin->runcur = nullptr;
-		arena_run_tree_new(&bin->runs);
-
-		bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
-
-		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
-
-		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-	}
+  mSpare = nullptr;
+
+  mNumDirty = 0;
+  // Reduce the maximum amount of dirty pages we allow to be kept on
+  // thread local arenas. TODO: make this more flexible.
+  mMaxDirty = opt_dirty_max >> 3;
+
+  arena_avail_tree_new(&mRunsAvail);
+
+  /* Initialize bins. */
+  prev_run_size = pagesize;
+
+  /* (2^n)-spaced tiny bins. */
+  for (i = 0; i < ntbins; i++) {
+    bin = &mBins[i];
+    bin->runcur = nullptr;
+    arena_run_tree_new(&bin->runs);
+
+    bin->reg_size = (1ULL << (TINY_MIN_2POW + i));
+
+    prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
+
+    memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+  }
+
+  /* Quantum-spaced bins. */
+  for (; i < ntbins + nqbins; i++) {
+    bin = &mBins[i];
+    bin->runcur = nullptr;
+    arena_run_tree_new(&bin->runs);
+
+    bin->reg_size = quantum * (i - ntbins + 1);
+
+    prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
+
+    memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+  }
+
+  /* (2^n)-spaced sub-page bins. */
+  for (; i < ntbins + nqbins + nsbins; i++) {
+    bin = &mBins[i];
+    bin->runcur = nullptr;
+    arena_run_tree_new(&bin->runs);
+
+    bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
+
+    prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
+
+    memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+  }
 
 #if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
-	arena->mMagic = ARENA_MAGIC;
+  mMagic = ARENA_MAGIC;
 #endif
 
-	return (false);
+  return false;
 }
 
 static inline arena_t *
 arenas_fallback()
 {
 	/* Only reached if there is an OOM error. */
 
 	/*
@@ -4122,17 +4124,17 @@ arenas_extend()
 	 */
 	const size_t arenas_growth = 16;
 	arena_t *ret;
 
 
 	/* Allocate enough space for trailing bins. */
 	ret = (arena_t *)base_alloc(sizeof(arena_t)
 	    + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
-	if (!ret || arena_new(ret)) {
+	if (!ret || ret->Init()) {
 		return arenas_fallback();
         }
 
 	malloc_spin_lock(&arenas_lock);
 
 	/* Allocate and initialize arenas. */
 	if (narenas % arenas_growth == 0) {
 		size_t max_arenas = ((narenas + arenas_growth) / arenas_growth) * arenas_growth;
@@ -4627,17 +4629,17 @@ MALLOC_OUT:
    */
   arenas_extend();
   if (!arenas || !arenas[0]) {
 #ifndef XP_WIN
     malloc_mutex_unlock(&init_lock);
 #endif
     return true;
   }
-  /* arena_new() sets this to a lower value for thread local arenas;
+  /* arena_t::Init() sets this to a lower value for thread local arenas;
    * reset to the default value for the main arenas */
   arenas[0]->mMaxDirty = opt_dirty_max;
 
 #ifndef NO_TLS
   /*
    * Assign the initial arena to the initial thread.
    */
   thread_arena.set(arenas[0]);