Bug 1418153 - Move arena selection one level up the call stack. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Tue, 14 Nov 2017 08:00:17 +0900
changeset 699639 fb8eb695e78bb88e999e9a35ae46c3ac8b4eb779
parent 699638 d20006670be7bfe39c83d482f54c6eb9ea77d418
child 699640 4cbc7d49bbb59a5007db3ee1ba660dbdbd54daf4
push id89625
push userbmo:mh+mozilla@glandium.org
push dateFri, 17 Nov 2017 11:44:35 +0000
reviewersnjn
bugs1418153
milestone59.0a1
Bug 1418153 - Move arena selection one level up the call stack. r?njn We intend to move some functions to methods of the arena_t class. Moving the arena selection out of them is the first step towards that.
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -3076,17 +3076,17 @@ arena_t::Malloc(size_t aSize, bool aZero
                                  : MallocLarge(aSize, aZero);
 }
 
 static inline void*
 imalloc(size_t aSize, bool aZero, arena_t* aArena)
 {
   MOZ_ASSERT(aSize != 0);
 
-  aArena = aArena ? aArena : choose_arena(aSize);
+  MOZ_ASSERT(aArena);
   if (aSize <= gMaxLargeClass) {
     return aArena->Malloc(aSize, aZero);
   }
   return huge_malloc(aSize, aZero, aArena);
 }
 
 // Only handles large allocations that require more than page alignment.
 void*
@@ -3168,17 +3168,17 @@ ipalloc(size_t aAlignment, size_t aSize,
 
   // (ceil_size < aSize) protects against the combination of maximal
   // alignment and size greater than maximal alignment.
   if (ceil_size < aSize) {
     // size_t overflow.
     return nullptr;
   }
 
-  aArena = aArena ? aArena : choose_arena(aSize);
+  MOZ_ASSERT(aArena);
   if (ceil_size <= gPageSize ||
       (aAlignment <= gPageSize && ceil_size <= gMaxLargeClass)) {
     ret = aArena->Malloc(ceil_size, false);
   } else {
     size_t run_size;
 
     // We can't achieve sub-page alignment, so round up alignment
     // permanently; it makes later calculations simpler.
@@ -3875,17 +3875,18 @@ huge_palloc(size_t aSize, size_t aAlignm
   if (aZero) {
     chunk_ensure_zero(ret, csize, zeroed);
   }
 
   // Insert node into huge.
   node->mAddr = ret;
   psize = PAGE_CEILING(aSize);
   node->mSize = psize;
-  node->mArena = aArena ? aArena : choose_arena(aSize);
+  MOZ_ASSERT(aArena);
+  node->mArena = aArena;
 
   {
     MutexAutoLock lock(huge_mtx);
     huge.Insert(node);
 
     // Although we allocated space for csize bytes, we indicate that we've
     // allocated only psize bytes.
     //
@@ -3953,17 +3954,17 @@ huge_ralloc(void* aPtr, size_t aSize, si
       pages_decommit((void*)((uintptr_t)aPtr + psize), aOldSize - psize);
 
       // Update recorded size.
       MutexAutoLock lock(huge_mtx);
       key.mAddr = const_cast<void*>(aPtr);
       extent_node_t* node = huge.Search(&key);
       MOZ_ASSERT(node);
       MOZ_ASSERT(node->mSize == aOldSize);
-      MOZ_RELEASE_ASSERT(!aArena || node->mArena == aArena);
+      MOZ_RELEASE_ASSERT(node->mArena == aArena);
       huge_allocated -= aOldSize - psize;
       // No need to change huge_mapped, because we didn't (un)map anything.
       node->mSize = psize;
     } else if (psize > aOldSize) {
       if (!pages_commit((void*)((uintptr_t)aPtr + aOldSize),
                         psize - aOldSize)) {
         return nullptr;
       }
@@ -3978,17 +3979,17 @@ huge_ralloc(void* aPtr, size_t aSize, si
     if (psize > aOldSize) {
       // Update recorded size.
       extent_node_t key;
       MutexAutoLock lock(huge_mtx);
       key.mAddr = const_cast<void*>(aPtr);
       extent_node_t* node = huge.Search(&key);
       MOZ_ASSERT(node);
       MOZ_ASSERT(node->mSize == aOldSize);
-      MOZ_RELEASE_ASSERT(!aArena || node->mArena == aArena);
+      MOZ_RELEASE_ASSERT(node->mArena == aArena);
       huge_allocated += psize - aOldSize;
       // No need to change huge_mapped, because we didn't
       // (un)map anything.
       node->mSize = psize;
     }
 
     if (opt_zero && aSize > aOldSize) {
       memset((void*)((uintptr_t)aPtr + aOldSize), 0, aSize - aOldSize);
@@ -4249,27 +4250,28 @@ private:
   }
 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
 #include "malloc_decls.h"
 
 inline void*
 BaseAllocator::malloc(size_t aSize)
 {
   void* ret;
+  arena_t* arena;
 
   if (!malloc_init()) {
     ret = nullptr;
     goto RETURN;
   }
 
   if (aSize == 0) {
     aSize = 1;
   }
-
-  ret = imalloc(aSize, /* zero = */ false, mArena);
+  arena = mArena ? mArena : choose_arena(aSize);
+  ret = imalloc(aSize, /* zero = */ false, arena);
 
 RETURN:
   if (!ret) {
     errno = ENOMEM;
   }
 
   return ret;
 }
@@ -4285,34 +4287,36 @@ BaseAllocator::memalign(size_t aAlignmen
     return nullptr;
   }
 
   if (aSize == 0) {
     aSize = 1;
   }
 
   aAlignment = aAlignment < sizeof(void*) ? sizeof(void*) : aAlignment;
-  ret = ipalloc(aAlignment, aSize, mArena);
+  arena_t* arena = mArena ? mArena : choose_arena(aSize);
+  ret = ipalloc(aAlignment, aSize, arena);
 
   return ret;
 }
 
 inline void*
 BaseAllocator::calloc(size_t aNum, size_t aSize)
 {
   void* ret;
 
   if (malloc_init()) {
     CheckedInt<size_t> checkedSize = CheckedInt<size_t>(aNum) * aSize;
     if (checkedSize.isValid()) {
       size_t allocSize = checkedSize.value();
       if (allocSize == 0) {
         allocSize = 1;
       }
-      ret = imalloc(allocSize, /* zero = */ true, mArena);
+      arena_t* arena = mArena ? mArena : choose_arena(allocSize);
+      ret = imalloc(allocSize, /* zero = */ true, arena);
     } else {
       ret = nullptr;
     }
   } else {
     ret = nullptr;
   }
 
   if (!ret) {
@@ -4338,17 +4342,18 @@ BaseAllocator::realloc(void* aPtr, size_
 
     if (!ret) {
       errno = ENOMEM;
     }
   } else {
     if (!malloc_init()) {
       ret = nullptr;
     } else {
-      ret = imalloc(aSize, /* zero = */ false, mArena);
+      arena_t* arena = mArena ? mArena : choose_arena(aSize);
+      ret = imalloc(aSize, /* zero = */ false, arena);
     }
 
     if (!ret) {
       errno = ENOMEM;
     }
   }
 
   return ret;