Bug 1414155 - Factor out size classes logic for tiny, quantum and sub-page classes. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 03 Nov 2017 08:53:34 +0900
changeset 693241 14dfed6a24090042718bb213d1fd8878a9169e75
parent 693240 bab4c5597518a6e1240f1b517224b4b8658a2f60
child 693242 20588eebdfb5223a3d131c1eaa71f4afebd3de10
push id87735
push userbmo:mh+mozilla@glandium.org
push dateSat, 04 Nov 2017 22:08:08 +0000
reviewersnjn
bugs1414155
milestone58.0a1
Bug 1414155 - Factor out size classes logic for tiny, quantum and sub-page classes. r?njn We create a new helper class that rounds up allocations sizes and categorizes them. Compilers are smart enough to elide what they don't need, like in malloc_good_size, they elide the code related to the class type enum.
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -569,16 +569,60 @@ struct ExtentTreeBoundsTrait : public Ex
     if (node_addr <= key_addr && key_addr < node_addr + node_size) {
       return 0;
     }
 
     return (key_addr > node_addr) - (key_addr < node_addr);
   }
 };
 
+// Describe size classes to which allocations are rounded up to.
+// TODO: add large and huge types when the arena allocation code
+// changes in a way that allows it to be beneficial.
+class SizeClass
+{
+public:
+  enum ClassType
+  {
+    Tiny,
+    Quantum,
+    SubPage,
+  };
+
+  explicit inline SizeClass(size_t aSize)
+  {
+    if (aSize < small_min) {
+      mType = Tiny;
+      mSize = std::max(RoundUpPow2(aSize), size_t(1U << TINY_MIN_2POW));
+    } else if (aSize <= small_max) {
+      mType = Quantum;
+      mSize = QUANTUM_CEILING(aSize);
+    } else if (aSize <= bin_maxclass) {
+      mType = SubPage;
+      mSize = RoundUpPow2(aSize);
+    } else {
+      MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Invalid size");
+    }
+  }
+
+  SizeClass& operator=(const SizeClass& aOther) = default;
+
+  bool operator==(const SizeClass& aOther) { return aOther.mSize == mSize; }
+
+  size_t Size() { return mSize; }
+
+  ClassType Type() { return mType; }
+
+  SizeClass Next() { return SizeClass(mSize + 1); }
+
+private:
+  ClassType mType;
+  size_t mSize;
+};
+
 // ***************************************************************************
 // Radix tree data structures.
 //
 // The number of bits passed to the template is the number of significant bits
 // in an address to do a radix lookup with.
 //
 // An address is looked up by splitting it in kBitsPerLevel bit chunks, except
 // the most significant bits, where the bit chunk is kBitsAtLevel1 which can be
@@ -2979,33 +3023,32 @@ arena_bin_run_size_calc(arena_bin_t* bin
 }
 
 void*
 arena_t::MallocSmall(size_t aSize, bool aZero)
 {
   void* ret;
   arena_bin_t* bin;
   arena_run_t* run;
-
-  if (aSize < small_min) {
-    // Tiny.
-    aSize = RoundUpPow2(aSize);
-    if (aSize < (1U << TINY_MIN_2POW)) {
-      aSize = 1U << TINY_MIN_2POW;
-    }
-    bin = &mBins[FloorLog2(aSize >> TINY_MIN_2POW)];
-  } else if (aSize <= small_max) {
-    // Quantum-spaced.
-    aSize = QUANTUM_CEILING(aSize);
-    bin = &mBins[ntbins + (aSize >> QUANTUM_2POW_MIN) - 1];
-  } else {
-    // Sub-page.
-    aSize = RoundUpPow2(aSize);
-    bin = &mBins[ntbins + nqbins +
-                 (FloorLog2(aSize >> SMALL_MAX_2POW_DEFAULT) - 1)];
+  SizeClass sizeClass(aSize);
+  aSize = sizeClass.Size();
+
+  switch (sizeClass.Type()) {
+    case SizeClass::Tiny:
+      bin = &mBins[FloorLog2(aSize >> TINY_MIN_2POW)];
+      break;
+    case SizeClass::Quantum:
+      bin = &mBins[ntbins + (aSize >> QUANTUM_2POW_MIN) - 1];
+      break;
+    case SizeClass::SubPage:
+      bin = &mBins[ntbins + nqbins +
+                   (FloorLog2(aSize >> SMALL_MAX_2POW_DEFAULT) - 1)];
+      break;
+    default:
+      MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unexpected size class type");
   }
   MOZ_DIAGNOSTIC_ASSERT(aSize == bin->mSizeClass);
 
   {
     MutexAutoLock lock(mLock);
     if ((run = bin->mCurrentRun) && run->nfree > 0) {
       ret = MallocBinEasy(bin, run);
     } else {
@@ -3681,32 +3724,25 @@ arena_ralloc_large(void* aPtr, size_t aS
 
 static void*
 arena_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena)
 {
   void* ret;
   size_t copysize;
 
   // Try to avoid moving the allocation.
-  if (aSize < small_min) {
-    if (aOldSize < small_min &&
-        (RoundUpPow2(aSize) >> (TINY_MIN_2POW + 1) ==
-         RoundUpPow2(aOldSize) >> (TINY_MIN_2POW + 1))) {
-      goto IN_PLACE; // Same size class.
-    }
-  } else if (aSize <= small_max) {
-    if (aOldSize >= small_min && aOldSize <= small_max &&
-        (QUANTUM_CEILING(aSize) >> QUANTUM_2POW_MIN) ==
-          (QUANTUM_CEILING(aOldSize) >> QUANTUM_2POW_MIN)) {
-      goto IN_PLACE; // Same size class.
-    }
-  } else if (aSize <= bin_maxclass) {
-    if (aOldSize > small_max && aOldSize <= bin_maxclass &&
-        RoundUpPow2(aSize) == RoundUpPow2(aOldSize)) {
-      goto IN_PLACE; // Same size class.
+  if (aSize <= bin_maxclass) {
+    if (aOldSize <= bin_maxclass && SizeClass(aSize) == SizeClass(aOldSize)) {
+      if (aSize < aOldSize) {
+        memset(
+          (void*)(uintptr_t(aPtr) + aSize), kAllocPoison, aOldSize - aSize);
+      } else if (opt_zero && aSize > aOldSize) {
+        memset((void*)(uintptr_t(aPtr) + aOldSize), 0, aSize - aOldSize);
+      }
+      return aPtr;
     }
   } else if (aOldSize > bin_maxclass && aOldSize <= arena_maxclass) {
     MOZ_ASSERT(aSize > bin_maxclass);
     if (arena_ralloc_large(aPtr, aSize, aOldSize)) {
       return aPtr;
     }
   }
 
@@ -3726,23 +3762,16 @@ arena_ralloc(void* aPtr, size_t aSize, s
     pages_copy(ret, aPtr, copysize);
   } else
 #endif
   {
     memcpy(ret, aPtr, copysize);
   }
   idalloc(aPtr);
   return ret;
-IN_PLACE:
-  if (aSize < aOldSize) {
-    memset((void*)(uintptr_t(aPtr) + aSize), kAllocPoison, aOldSize - aSize);
-  } else if (opt_zero && aSize > aOldSize) {
-    memset((void*)(uintptr_t(aPtr) + aOldSize), 0, aSize - aOldSize);
-  }
-  return aPtr;
 }
 
 static inline void*
 iralloc(void* aPtr, size_t aSize, arena_t* aArena)
 {
   size_t oldsize;
 
   MOZ_ASSERT(aPtr);
@@ -3776,55 +3805,36 @@ arena_t::arena_t()
   // Reduce the maximum amount of dirty pages we allow to be kept on
   // thread local arenas. TODO: make this more flexible.
   mMaxDirty = opt_dirty_max >> 3;
 
   mRunsAvail.Init();
 
   // Initialize bins.
   prev_run_size = pagesize;
-
-  // (2^n)-spaced tiny bins.
-  for (i = 0; i < ntbins; i++) {
+  SizeClass sizeClass(1);
+
+  for (i = 0;; i++) {
     bin = &mBins[i];
     bin->mCurrentRun = nullptr;
     bin->mNonFullRuns.Init();
 
-    bin->mSizeClass = (1ULL << (TINY_MIN_2POW + i));
+    bin->mSizeClass = sizeClass.Size();
 
     prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
 
     bin->mNumRuns = 0;
-  }
-
-  // Quantum-spaced bins.
-  for (; i < ntbins + nqbins; i++) {
-    bin = &mBins[i];
-    bin->mCurrentRun = nullptr;
-    bin->mNonFullRuns.Init();
-
-    bin->mSizeClass = quantum * (i - ntbins + 1);
-
-    prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
-
-    bin->mNumRuns = 0;
-  }
-
-  // (2^n)-spaced sub-page bins.
-  for (; i < ntbins + nqbins + nsbins; i++) {
-    bin = &mBins[i];
-    bin->mCurrentRun = nullptr;
-    bin->mNonFullRuns.Init();
-
-    bin->mSizeClass = (small_max << (i - (ntbins + nqbins) + 1));
-
-    prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
-
-    bin->mNumRuns = 0;
-  }
+
+    // SizeClass doesn't want sizes larger than bin_maxclass for now.
+    if (sizeClass.Size() == bin_maxclass) {
+      break;
+    }
+    sizeClass = sizeClass.Next();
+  }
+  MOZ_ASSERT(i == ntbins + nqbins + nsbins - 1);
 
 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
   mMagic = ARENA_MAGIC;
 #endif
 }
 
 arena_t*
 ArenaCollection::CreateArena(bool aIsPrivate)
@@ -4471,34 +4481,19 @@ MozJemalloc::valloc(size_t aSize)
 // ***************************************************************************
 // Begin non-standard functions.
 
 // This was added by Mozilla for use by SQLite.
 template<>
 inline size_t
 MozJemalloc::malloc_good_size(size_t aSize)
 {
-  // This duplicates the logic in imalloc(), arena_malloc() and
-  // arena_t::MallocSmall().
-  if (aSize < small_min) {
-    // Small (tiny).
-    aSize = RoundUpPow2(aSize);
-
-    // We omit the #ifdefs from arena_t::MallocSmall() --
-    // it can be inaccurate with its size in some cases, but this
-    // function must be accurate.
-    if (aSize < (1U << TINY_MIN_2POW)) {
-      aSize = (1U << TINY_MIN_2POW);
-    }
-  } else if (aSize <= small_max) {
-    // Small (quantum-spaced).
-    aSize = QUANTUM_CEILING(aSize);
-  } else if (aSize <= bin_maxclass) {
-    // Small (sub-page).
-    aSize = RoundUpPow2(aSize);
+  if (aSize <= bin_maxclass) {
+    // Small
+    aSize = SizeClass(aSize).Size();
   } else if (aSize <= arena_maxclass) {
     // Large.
     aSize = PAGE_CEILING(aSize);
   } else {
     // Huge.  We use PAGE_CEILING to get psize, instead of using
     // CHUNK_CEILING to get csize.  This ensures that this
     // malloc_usable_size(malloc(n)) always matches
     // malloc_good_size(n).