Bug 1414155 - Replace constants describing size class numbers. r?njn
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -399,22 +399,22 @@ static const size_t kQuantumMask = kQuan
static const size_t kMinQuantumClass = kMaxTinyClass * 2;
// Largest quantum-spaced size classes.
static const size_t kMaxQuantumClass = 512;
static_assert(kMaxQuantumClass % kQuantum == 0,
"kMaxQuantumClass is not a multiple of kQuantum");
-// Number of (2^n)-spaced tiny bins.
-static const unsigned ntbins =
- unsigned(LOG2(kMinQuantumClass) - LOG2(kMinTinyClass));
-
-// Number of quantum-spaced bins.
-static const unsigned nqbins = unsigned(kMaxQuantumClass / kQuantum);
+// Number of (2^n)-spaced tiny classes.
+static const size_t kNumTinyClasses =
+ LOG2(kMinQuantumClass) - LOG2(kMinTinyClass);
+
+// Number of quantum-spaced classes.
+static const size_t kNumQuantumClasses = kMaxQuantumClass / kQuantum;
// Size and alignment of memory chunks that are allocated by the OS's virtual
// memory system.
static const size_t kChunkSize = 1_MiB;
static const size_t kChunkSizeMask = kChunkSize - 1;
#ifdef MALLOC_STATIC_PAGESIZE
// VM page size. It must divide the runtime CPU page size or the code
@@ -452,33 +452,33 @@ static size_t gPageSize;
{
#define END_GLOBALS }
#define DEFINE_GLOBAL(type)
#define GLOBAL_LOG2 FloorLog2
#define GLOBAL_ASSERT MOZ_RELEASE_ASSERT
#endif
DECLARE_GLOBAL(size_t, gMaxSubPageClass)
-DECLARE_GLOBAL(uint8_t, nsbins)
+DECLARE_GLOBAL(uint8_t, gNumSubPageClasses)
DECLARE_GLOBAL(uint8_t, gPageSize2Pow)
DECLARE_GLOBAL(size_t, gPageSizeMask)
DECLARE_GLOBAL(size_t, gChunkNumPages)
DECLARE_GLOBAL(size_t, gChunkHeaderNumPages)
DECLARE_GLOBAL(size_t, gMaxLargeClass)
DEFINE_GLOBALS
// Largest sub-page size class.
DEFINE_GLOBAL(size_t) gMaxSubPageClass = gPageSize / 2;
// Max size class for bins.
#define gMaxBinClass gMaxSubPageClass
// Number of (2^n)-spaced sub-page bins.
DEFINE_GLOBAL(uint8_t)
-nsbins = GLOBAL_LOG2(gMaxSubPageClass) - LOG2(kMaxQuantumClass);
+gNumSubPageClasses = GLOBAL_LOG2(gMaxSubPageClass) - LOG2(kMaxQuantumClass);
DEFINE_GLOBAL(uint8_t) gPageSize2Pow = GLOBAL_LOG2(gPageSize);
DEFINE_GLOBAL(size_t) gPageSizeMask = gPageSize - 1;
// Number of pages in a chunk.
DEFINE_GLOBAL(size_t) gChunkNumPages = kChunkSize >> gPageSize2Pow;
// Number of pages necessary for a chunk header.
@@ -1049,18 +1049,19 @@ public:
void* operator new(size_t aCount, const fallible_t&)
#if !defined(_MSC_VER) || defined(_CPPUNWIND)
noexcept
#endif
{
MOZ_ASSERT(aCount == sizeof(arena_t));
// Allocate enough space for trailing bins.
- return base_alloc(aCount +
- (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
+ return base_alloc(
+ aCount + (sizeof(arena_bin_t) * (kNumTinyClasses + kNumQuantumClasses +
+ gNumSubPageClasses - 1)));
}
void operator delete(void*) = delete;
};
struct ArenaTreeTrait
{
static RedBlackTreeNode<arena_t>& GetTreeNode(arena_t* aThis)
@@ -2338,17 +2339,18 @@ arena_run_reg_dalloc(arena_run_t* run, a
SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
};
// clang-format on
unsigned diff, regind, elm, bit;
MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
- static_assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3 >= nqbins,
+ static_assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3 >=
+ kNumQuantumClasses,
"size_invs doesn't have enough values");
// Avoid doing division with a variable divisor if possible. Using
// actual division here can reduce allocator throughput by over 20%!
diff =
(unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->mRunFirstRegionOffset);
if ((size & (size - 1)) == 0) {
// log2_table allows fast division of a power of two in the
@@ -3035,21 +3037,21 @@ arena_t::MallocSmall(size_t aSize, bool
SizeClass sizeClass(aSize);
aSize = sizeClass.Size();
switch (sizeClass.Type()) {
case SizeClass::Tiny:
bin = &mBins[FloorLog2(aSize / kMinTinyClass)];
break;
case SizeClass::Quantum:
- bin = &mBins[ntbins + (aSize / kQuantum) - 1];
+ bin = &mBins[kNumTinyClasses + (aSize / kQuantum) - 1];
break;
case SizeClass::SubPage:
- bin =
- &mBins[ntbins + nqbins + (FloorLog2(aSize / kMaxQuantumClass) - 1)];
+ bin = &mBins[kNumTinyClasses + kNumQuantumClasses +
+ (FloorLog2(aSize / kMaxQuantumClass) - 1)];
break;
default:
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unexpected size class type");
}
MOZ_DIAGNOSTIC_ASSERT(aSize == bin->mSizeClass);
{
MutexAutoLock lock(mLock);
@@ -3828,17 +3830,18 @@ arena_t::arena_t()
bin->mNumRuns = 0;
// SizeClass doesn't want sizes larger than gMaxSubPageClass for now.
if (sizeClass.Size() == gMaxSubPageClass) {
break;
}
sizeClass = sizeClass.Next();
}
- MOZ_ASSERT(i == ntbins + nqbins + nsbins - 1);
+ MOZ_ASSERT(i ==
+ kNumTinyClasses + kNumQuantumClasses + gNumSubPageClasses - 1);
#if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
mMagic = ARENA_MAGIC;
#endif
}
arena_t*
ArenaCollection::CreateArena(bool aIsPrivate)
@@ -4562,17 +4565,18 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
// "committed" counts dirty and allocated memory.
arena_committed = arena->mStats.committed << gPageSize2Pow;
arena_allocated =
arena->mStats.allocated_small + arena->mStats.allocated_large;
arena_dirty = arena->mNumDirty << gPageSize2Pow;
- for (j = 0; j < ntbins + nqbins + nsbins; j++) {
+ for (j = 0; j < kNumTinyClasses + kNumQuantumClasses + gNumSubPageClasses;
+ j++) {
arena_bin_t* bin = &arena->mBins[j];
size_t bin_unused = 0;
for (auto mapelm : bin->mNonFullRuns.iter()) {
run = (arena_run_t*)(mapelm->bits & ~gPageSizeMask);
bin_unused += run->nfree * bin->mSizeClass;
}