Bug 1406303 - Don't heap-allocate the global chunk radix tree. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 06 Oct 2017 16:18:01 +0900
changeset 677072 fe07958614e7a715c2e35a994d4dc068b04d7302
parent 677071 153dabc7d854716d7439c2f466ba7059b70f0aec
child 735104 0ad861bac4a30bccd6de628be4c3a1fb99c59ae3
push id83679
push userbmo:mh+mozilla@glandium.org
push dateMon, 09 Oct 2017 23:14:34 +0000
reviewersnjn
bugs1406303
milestone58.0a1
Bug 1406303 - Don't heap-allocate the global chunk radix tree. r?njn Now that the radix tree structure has a fixed size, we can just allocate the chunk radix tree object statically.
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -672,17 +672,17 @@ class AddressRadixTree {
   static const size_t kHeight = (Bits + kBitsPerLevel - 1) / kBitsPerLevel;
   static_assert(kBitsAtLevel1 + (kHeight - 1) * kBitsPerLevel == Bits,
                 "AddressRadixTree parameters don't work out");
 
   malloc_spinlock_t mLock;
   void** mRoot;
 
 public:
-  static AddressRadixTree<Bits>* Create();
+  bool Init();
 
   inline void* Get(void* aAddr);
 
   // Returns whether the value was properly set
   inline bool Set(void* aAddr, void* aValue);
 
   inline bool Unset(void* aAddr)
   {
@@ -1064,17 +1064,17 @@ struct ArenaTreeTrait
   }
 };
 
 /********/
 /*
  * Chunks.
  */
 
-static AddressRadixTree<(SIZEOF_PTR << 3) - CHUNK_2POW_DEFAULT>* gChunkRTree;
+static AddressRadixTree<(SIZEOF_PTR << 3) - CHUNK_2POW_DEFAULT> gChunkRTree;
 
 /* Protects chunk-related data structures. */
 static malloc_mutex_t	chunks_mtx;
 
 /*
  * Trees of chunks that were previously allocated (trees differ only in node
  * ordering).  These are used when allocating chunks, in an attempt to re-use
  * address space.  Depending on function, different tree orderings are needed,
@@ -1745,35 +1745,23 @@ pages_copy(void *dest, const void *src, 
 	MOZ_ASSERT((void *)((uintptr_t)src & ~pagesize_mask) == src);
 
 	vm_copy(mach_task_self(), (vm_address_t)src, (vm_size_t)n,
 	    (vm_address_t)dest);
 }
 #endif
 
 template <size_t Bits>
-AddressRadixTree<Bits>*
-AddressRadixTree<Bits>::Create()
+bool
+AddressRadixTree<Bits>::Init()
 {
-  AddressRadixTree<Bits>* ret;
-
-  ret = (AddressRadixTree<Bits>*)base_calloc(1, sizeof(AddressRadixTree<Bits>));
-  if (!ret) {
-    return nullptr;
-  }
-
-  malloc_spin_init(&ret->mLock);
-
-  ret->mRoot = (void**)base_calloc(1 << kBitsAtLevel1, sizeof(void*));
-  if (!ret->mRoot) {
-    // We leak the rtree here, since there's no generic base deallocation.
-    return nullptr;
-  }
-
-  return ret;
+  malloc_spin_init(&mLock);
+
+  mRoot = (void**)base_calloc(1 << kBitsAtLevel1, sizeof(void*));
+  return mRoot;
 }
 
 template <size_t Bits>
 void**
 AddressRadixTree<Bits>::GetSlot(void* aKey, bool aCreate)
 {
   uintptr_t key = reinterpret_cast<uintptr_t>(aKey);
   uintptr_t subkey;
@@ -2141,17 +2129,17 @@ chunk_alloc(size_t size, size_t alignmen
 		goto RETURN;
 	}
 
 	/* All strategies for allocation failed. */
 	ret = nullptr;
 RETURN:
 
 	if (ret && base == false) {
-		if (!gChunkRTree->Set(ret, ret)) {
+		if (!gChunkRTree.Set(ret, ret)) {
 			chunk_dealloc(ret, size, UNKNOWN_CHUNK);
 			return nullptr;
 		}
 	}
 
 	MOZ_ASSERT(CHUNK_ADDR2BASE(ret) == ret);
 	return (ret);
 }
@@ -2273,17 +2261,17 @@ static void
 chunk_dealloc(void *chunk, size_t size, ChunkType type)
 {
 
 	MOZ_ASSERT(chunk);
 	MOZ_ASSERT(CHUNK_ADDR2BASE(chunk) == chunk);
 	MOZ_ASSERT(size != 0);
 	MOZ_ASSERT((size & chunksize_mask) == 0);
 
-	gChunkRTree->Unset(chunk);
+	gChunkRTree.Unset(chunk);
 
 	if (CAN_RECYCLE(size)) {
 		size_t recycled_so_far = load_acquire_z(&recycled_size);
 		// In case some race condition put us above the limit.
 		if (recycled_so_far < recycle_limit) {
 			size_t recycle_remaining = recycle_limit - recycled_so_far;
 			size_t to_recycle;
 			if (size > recycle_remaining) {
@@ -3475,17 +3463,17 @@ isalloc_validate(const void* ptr)
     return 0;
   }
 
   arena_chunk_t* chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(ptr);
   if (!chunk) {
     return 0;
   }
 
-  if (!gChunkRTree->Get(chunk)) {
+  if (!gChunkRTree.Get(chunk)) {
     return 0;
   }
 
   if (chunk != ptr) {
     MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
     return arena_salloc(ptr);
   } else {
     size_t ret;
@@ -3563,17 +3551,17 @@ MozJemalloc::jemalloc_ptr_info(const voi
     *aInfo = { TagLiveHuge, node->addr, node->size };
   }
   malloc_mutex_unlock(&huge_mtx);
   if (node) {
     return;
   }
 
   // It's not a huge allocation. Check if we have a known chunk.
-  if (!gChunkRTree->Get(chunk)) {
+  if (!gChunkRTree.Get(chunk)) {
     *aInfo = { TagUnknown, nullptr, 0 };
     return;
   }
 
   MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
 
   // Get the page number within the chunk.
   size_t pageind = (((uintptr_t)aPtr - (uintptr_t)chunk) >> pagesize_2pow);
@@ -4503,18 +4491,17 @@ MALLOC_OUT:
    * reset to the default value for the main arena. */
   gMainArena->mMaxDirty = opt_dirty_max;
 
   /*
    * Assign the initial arena to the initial thread.
    */
   thread_arena.set(gMainArena);
 
-  gChunkRTree = AddressRadixTree<(SIZEOF_PTR << 3) - CHUNK_2POW_DEFAULT>::Create();
-  if (!gChunkRTree) {
+  if (!gChunkRTree.Init()) {
     return true;
   }
 
   malloc_initialized = true;
 
 #if !defined(XP_WIN) && !defined(XP_DARWIN)
   /* Prevent potential deadlock on malloc locks after fork. */
   pthread_atfork(_malloc_prefork, _malloc_postfork_parent, _malloc_postfork_child);