--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -54,18 +54,17 @@
// of the original request size is maintained. Allocations are broken into
// categories according to size class. Assuming runtime defaults, 4 kB pages
// and a 16 byte quantum on a 32-bit system, the size classes in each category
// are as follows:
//
// |=====================================|
// | Category | Subcategory | Size |
// |=====================================|
-// | Small | Tiny | 2 |
-// | | | 4 |
+// | Small | Tiny | 4 |
// | | | 8 |
// | |----------------+---------|
// | | Quantum-spaced | 16 |
// | | | 32 |
// | | | 48 |
// | | | ... |
// | | | 480 |
// | | | 496 |
@@ -367,57 +366,59 @@ struct arena_chunk_t
// Map of pages within chunk that keeps track of free/large/small.
arena_chunk_map_t map[1]; // Dynamically sized.
};
// ***************************************************************************
// Constants defining allocator size classes and behavior.
-// Minimum alignment of non-tiny allocations is 2^QUANTUM_2POW_MIN bytes.
-#define QUANTUM_2POW_MIN 4
-
// Size and alignment of memory chunks that are allocated by the OS's virtual
// memory system.
#define CHUNK_2POW_DEFAULT 20
// Maximum size of L1 cache line. This is used to avoid cache line aliasing,
// so over-estimates are okay (up to a point), but under-estimates will
// negatively affect performance.
static const size_t kCacheLineSize = 64;
// Smallest size class to support. On Windows the smallest allocation size
// must be 8 bytes on 32-bit, 16 bytes on 64-bit. On Linux and Mac, even
// malloc(1) must reserve a word's worth of memory (see Mozilla bug 691003).
#ifdef XP_WIN
-#define TINY_MIN_2POW (sizeof(void*) == 8 ? 4 : 3)
+static const size_t kMinTinyClass = sizeof(void*) * 2;
#else
-#define TINY_MIN_2POW (sizeof(void*) == 8 ? 3 : 2)
+static const size_t kMinTinyClass = sizeof(void*);
#endif
-// Maximum size class that is a multiple of the quantum, but not (necessarily)
-// a power of 2. Above this size, allocations are rounded up to the nearest
-// power of 2.
-#define SMALL_MAX_2POW_DEFAULT 9
-#define SMALL_MAX_DEFAULT (1U << SMALL_MAX_2POW_DEFAULT)
-
-// Various quantum-related settings.
-#define QUANTUM_DEFAULT (size_t(1) << QUANTUM_2POW_MIN)
-static const size_t quantum = QUANTUM_DEFAULT;
-static const size_t quantum_mask = QUANTUM_DEFAULT - 1;
-
-// Various bin-related settings.
-static const size_t small_min = (QUANTUM_DEFAULT >> 1) + 1;
-static const size_t small_max = size_t(SMALL_MAX_DEFAULT);
+// Maximum tiny size class.
+static const size_t kMaxTinyClass = 8;
+
+// Amount (quantum) separating quantum-spaced size classes.
+static const size_t kQuantum = 16;
+static const size_t kQuantumMask = kQuantum - 1;
+
+// Smallest quantum-spaced size classes. It could actually also be labelled a
+// tiny allocation, and is spaced as such from the largest tiny size class.
+// Tiny classes being powers of 2, this is twice as large as the largest of
+// them.
+static const size_t kMinQuantumClass = kMaxTinyClass * 2;
+
+// Largest quantum-spaced size classes.
+static const size_t kMaxQuantumClass = 512;
+
+static_assert(kMaxQuantumClass % kQuantum == 0,
+ "kMaxQuantumClass is not a multiple of kQuantum");
// Number of (2^n)-spaced tiny bins.
-static const unsigned ntbins = unsigned(QUANTUM_2POW_MIN - TINY_MIN_2POW);
+static const unsigned ntbins =
+ unsigned(LOG2(kMinQuantumClass) - LOG2(kMinTinyClass));
// Number of quantum-spaced bins.
-static const unsigned nqbins = unsigned(SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN);
+static const unsigned nqbins = unsigned(kMaxQuantumClass / kQuantum);
#define CHUNKSIZE_DEFAULT ((size_t)1 << CHUNK_2POW_DEFAULT)
static const size_t chunksize = CHUNKSIZE_DEFAULT;
static const size_t chunksize_mask = CHUNKSIZE_DEFAULT - 1;
#ifdef MALLOC_STATIC_PAGESIZE
// VM page size. It must divide the runtime CPU page size or the code
// will abort.
@@ -453,57 +454,60 @@ static size_t pagesize;
static void DefineGlobals() \
{
#define END_GLOBALS }
#define DEFINE_GLOBAL(type)
#define GLOBAL_LOG2 FloorLog2
#define GLOBAL_ASSERT MOZ_RELEASE_ASSERT
#endif
-DECLARE_GLOBAL(size_t, pagesize_mask)
+DECLARE_GLOBAL(size_t, gMaxSubPageClass)
+DECLARE_GLOBAL(uint8_t, nsbins)
DECLARE_GLOBAL(uint8_t, pagesize_2pow)
-DECLARE_GLOBAL(uint8_t, nsbins)
-DECLARE_GLOBAL(size_t, bin_maxclass)
+DECLARE_GLOBAL(size_t, pagesize_mask)
DECLARE_GLOBAL(size_t, chunk_npages)
DECLARE_GLOBAL(size_t, arena_chunk_header_npages)
-DECLARE_GLOBAL(size_t, arena_maxclass)
+DECLARE_GLOBAL(size_t, gMaxLargeClass)
DEFINE_GLOBALS
-DEFINE_GLOBAL(size_t) pagesize_mask = pagesize - 1;
-DEFINE_GLOBAL(uint8_t) pagesize_2pow = GLOBAL_LOG2(pagesize);
+// Largest sub-page size class.
+DEFINE_GLOBAL(size_t) gMaxSubPageClass = pagesize / 2;
+
+// Max size class for bins.
+#define gMaxBinClass gMaxSubPageClass
// Number of (2^n)-spaced sub-page bins.
DEFINE_GLOBAL(uint8_t)
-nsbins = pagesize_2pow - SMALL_MAX_2POW_DEFAULT - 1;
-
-// Max size class for bins.
-DEFINE_GLOBAL(size_t) bin_maxclass = pagesize >> 1;
+nsbins = GLOBAL_LOG2(gMaxSubPageClass) - LOG2(kMaxQuantumClass);
+
+DEFINE_GLOBAL(uint8_t) pagesize_2pow = GLOBAL_LOG2(pagesize);
+DEFINE_GLOBAL(size_t) pagesize_mask = pagesize - 1;
// Number of pages in a chunk.
DEFINE_GLOBAL(size_t) chunk_npages = chunksize >> pagesize_2pow;
// Number of pages necessary for a chunk header.
DEFINE_GLOBAL(size_t)
arena_chunk_header_npages =
((sizeof(arena_chunk_t) + sizeof(arena_chunk_map_t) * (chunk_npages - 1) +
pagesize_mask) &
~pagesize_mask) >>
pagesize_2pow;
// Max size class for arenas.
DEFINE_GLOBAL(size_t)
-arena_maxclass = chunksize - (arena_chunk_header_npages << pagesize_2pow);
+gMaxLargeClass = chunksize - (arena_chunk_header_npages << pagesize_2pow);
// Various sanity checks that regard configuration.
GLOBAL_ASSERT(1ULL << pagesize_2pow == pagesize,
"Page size is not a power of two");
-GLOBAL_ASSERT(quantum >= sizeof(void*));
-GLOBAL_ASSERT(quantum <= pagesize);
+GLOBAL_ASSERT(kQuantum >= sizeof(void*));
+GLOBAL_ASSERT(kQuantum <= pagesize);
GLOBAL_ASSERT(chunksize >= pagesize);
-GLOBAL_ASSERT(quantum * 4 <= chunksize);
+GLOBAL_ASSERT(kQuantum * 4 <= chunksize);
END_GLOBALS
// Recycle at most 128 chunks. With 1 MiB chunks, this means we retain at most
// 6.25% of the process address space on a 32-bit OS for later use.
#define CHUNK_RECYCLE_LIMIT 128
static const size_t gRecycleLimit = CHUNK_RECYCLE_LIMIT * CHUNKSIZE_DEFAULT;
@@ -537,17 +541,17 @@ static size_t opt_dirty_max = DIRTY_MAX_
// Return the smallest chunk multiple that is >= s.
#define CHUNK_CEILING(s) (((s) + chunksize_mask) & ~chunksize_mask)
// Return the smallest cacheline multiple that is >= s.
#define CACHELINE_CEILING(s) \
(((s) + (kCacheLineSize - 1)) & ~(kCacheLineSize - 1))
// Return the smallest quantum multiple that is >= a.
-#define QUANTUM_CEILING(a) (((a) + quantum_mask) & ~quantum_mask)
+#define QUANTUM_CEILING(a) (((a) + (kQuantumMask)) & ~(kQuantumMask))
// Return the smallest pagesize multiple that is >= s.
#define PAGE_CEILING(s) (((s) + pagesize_mask) & ~pagesize_mask)
// ***************************************************************************
// MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
#if defined(MALLOC_DECOMMIT) && defined(MALLOC_DOUBLE_PURGE)
#error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
@@ -707,23 +711,23 @@ public:
{
Tiny,
Quantum,
SubPage,
};
explicit inline SizeClass(size_t aSize)
{
- if (aSize < small_min) {
+ if (aSize <= kMaxTinyClass) {
mType = Tiny;
- mSize = std::max(RoundUpPow2(aSize), size_t(1U << TINY_MIN_2POW));
- } else if (aSize <= small_max) {
+ mSize = std::max(RoundUpPow2(aSize), kMinTinyClass);
+ } else if (aSize <= kMaxQuantumClass) {
mType = Quantum;
mSize = QUANTUM_CEILING(aSize);
- } else if (aSize <= bin_maxclass) {
+ } else if (aSize <= gMaxSubPageClass) {
mType = SubPage;
mSize = RoundUpPow2(aSize);
} else {
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Invalid size");
}
}
SizeClass& operator=(const SizeClass& aOther) = default;
@@ -2241,18 +2245,18 @@ static inline arena_t*
choose_arena(size_t size)
{
arena_t* ret = nullptr;
// We can only use TLS if this is a PIC library, since for the static
// library version, libc's malloc is used by TLS allocation, which
// introduces a bootstrapping issue.
- // Only use a thread local arena for small sizes.
- if (size <= small_max) {
+ // Only use a thread local arena for quantum and tiny sizes.
+ if (size <= kMaxQuantumClass) {
ret = thread_arena.get();
}
if (!ret) {
ret = thread_local_arena(false);
}
MOZ_DIAGNOSTIC_ASSERT(ret);
return ret;
@@ -2320,20 +2324,20 @@ arena_run_reg_dalloc(arena_run_t* run, a
{
// To divide by a number D that is not a power of two we multiply
// by (2^21 / D) and then right shift by 21 positions.
//
// X / D
//
// becomes
//
-// (X * size_invs[(D >> QUANTUM_2POW_MIN) - 3]) >> SIZE_INV_SHIFT
+// (X * size_invs[(D / kQuantum) - 3]) >> SIZE_INV_SHIFT
#define SIZE_INV_SHIFT 21
-#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s << QUANTUM_2POW_MIN)) + 1)
+#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s * kQuantum)) + 1)
// clang-format off
static const unsigned size_invs[] = {
SIZE_INV(3),
SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
SIZE_INV(12),SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
@@ -2373,19 +2377,18 @@ arena_run_reg_dalloc(arena_run_t* run, a
regind = (diff >> log2_table[size - 1]);
} else if (size <= 32768) {
regind = diff >> (8 + log2_table[(size >> 8) - 1]);
} else {
// The run size is too large for us to use the lookup
// table. Use real division.
regind = diff / size;
}
- } else if (size <=
- ((sizeof(size_invs) / sizeof(unsigned)) << QUANTUM_2POW_MIN) + 2) {
- regind = size_invs[(size >> QUANTUM_2POW_MIN) - 3] * diff;
+ } else if (size <= ((sizeof(size_invs) / sizeof(unsigned)) * kQuantum) + 2) {
+ regind = size_invs[(size / kQuantum) - 3] * diff;
regind >>= SIZE_INV_SHIFT;
} else {
// size_invs isn't large enough to handle this size class, so
// calculate regind using actual division. This only happens
// if the user increases small_max via the 'S' runtime
// configuration option.
regind = diff / size;
};
@@ -2540,26 +2543,26 @@ arena_t::InitChunk(arena_chunk_t* aChunk
arena_run_t* run =
(arena_run_t*)(uintptr_t(aChunk) +
(arena_chunk_header_npages << pagesize_2pow));
#endif
for (i = 0; i < arena_chunk_header_npages; i++) {
aChunk->map[i].bits = 0;
}
- aChunk->map[i].bits = arena_maxclass | flags;
+ aChunk->map[i].bits = gMaxLargeClass | flags;
for (i++; i < chunk_npages - 1; i++) {
aChunk->map[i].bits = flags;
}
- aChunk->map[chunk_npages - 1].bits = arena_maxclass | flags;
+ aChunk->map[chunk_npages - 1].bits = gMaxLargeClass | flags;
#ifdef MALLOC_DECOMMIT
// Start out decommitted, in order to force a closer correspondence
// between dirty pages and committed untouched pages.
- pages_decommit(run, arena_maxclass);
+ pages_decommit(run, gMaxLargeClass);
#endif
mStats.committed += arena_chunk_header_npages;
// Insert the run into the tree of available runs.
mRunsAvail.Insert(&aChunk->map[arena_chunk_header_npages]);
#ifdef MALLOC_DOUBLE_PURGE
new (&aChunk->chunks_madvised_elem) DoublyLinkedListElement<arena_chunk_t>();
@@ -2597,17 +2600,17 @@ arena_t::DeallocChunk(arena_chunk_t* aCh
arena_run_t*
arena_t::AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero)
{
arena_run_t* run;
arena_chunk_map_t* mapelm;
arena_chunk_map_t key;
- MOZ_ASSERT(aSize <= arena_maxclass);
+ MOZ_ASSERT(aSize <= gMaxLargeClass);
MOZ_ASSERT((aSize & pagesize_mask) == 0);
// Search the arena's chunks for the lowest best fit.
key.bits = aSize | CHUNK_MAP_KEY;
mapelm = mRunsAvail.SearchOrNext(&key);
if (mapelm) {
arena_chunk_t* chunk = GetChunkForPtr(mapelm);
size_t pageind =
@@ -2812,17 +2815,17 @@ arena_t::DallocRun(arena_run_t* aRun, bo
size | (chunk->map[run_ind + run_pages - 1].bits & pagesize_mask);
}
// Insert into tree of available runs, now that coalescing is complete.
mRunsAvail.Insert(&chunk->map[run_ind]);
// Deallocate chunk if it is now completely unused.
if ((chunk->map[arena_chunk_header_npages].bits &
- (~pagesize_mask | CHUNK_MAP_ALLOCATED)) == arena_maxclass) {
+ (~pagesize_mask | CHUNK_MAP_ALLOCATED)) == gMaxLargeClass) {
DeallocChunk(chunk);
}
// Enforce mMaxDirty.
if (mNumDirty > mMaxDirty) {
Purge(false);
}
}
@@ -2951,31 +2954,31 @@ arena_t::MallocBinHard(arena_bin_t* aBin
MOZ_DIAGNOSTIC_ASSERT(aBin->mCurrentRun->nfree > 0);
return MallocBinEasy(aBin, aBin->mCurrentRun);
}
// Calculate bin->mRunSize such that it meets the following constraints:
//
// *) bin->mRunSize >= min_run_size
-// *) bin->mRunSize <= arena_maxclass
-// *) bin->mRunSize <= RUN_MAX_SMALL
+// *) bin->mRunSize <= gMaxLargeClass
+// *) bin->mRunSize <= gMaxBinClass
// *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
//
// bin->mRunNumRegions, bin->mRunNumRegionsMask, and bin->mRunFirstRegionOffset are
// also calculated here, since these settings are all interdependent.
static size_t
arena_bin_run_size_calc(arena_bin_t* bin, size_t min_run_size)
{
size_t try_run_size, good_run_size;
unsigned good_nregs, good_mask_nelms, good_reg0_offset;
unsigned try_nregs, try_mask_nelms, try_reg0_offset;
MOZ_ASSERT(min_run_size >= pagesize);
- MOZ_ASSERT(min_run_size <= arena_maxclass);
+ MOZ_ASSERT(min_run_size <= gMaxLargeClass);
// Calculate known-valid settings before entering the mRunSize
// expansion loop, so that the first part of the loop always copies
// valid settings.
//
// The do..while loop iteratively reduces the number of regions until
// the run header and the regions no longer overlap. A closed formula
// would be quite messy, since there is an interdependency between the
@@ -3007,17 +3010,17 @@ arena_bin_run_size_calc(arena_bin_t* bin
do {
try_nregs--;
try_mask_nelms =
(try_nregs >> (LOG2(sizeof(int)) + 3)) +
((try_nregs & ((1U << (LOG2(sizeof(int)) + 3)) - 1)) ? 1 : 0);
try_reg0_offset = try_run_size - (try_nregs * bin->mSizeClass);
} while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1)) >
try_reg0_offset);
- } while (try_run_size <= arena_maxclass &&
+ } while (try_run_size <= gMaxLargeClass &&
RUN_MAX_OVRHD * (bin->mSizeClass << 3) > RUN_MAX_OVRHD_RELAX &&
(try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size);
MOZ_ASSERT(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1)) <=
good_reg0_offset);
MOZ_ASSERT((good_mask_nelms << (LOG2(sizeof(int)) + 3)) >= good_nregs);
// Copy final settings.
@@ -3035,24 +3038,24 @@ arena_t::MallocSmall(size_t aSize, bool
void* ret;
arena_bin_t* bin;
arena_run_t* run;
SizeClass sizeClass(aSize);
aSize = sizeClass.Size();
switch (sizeClass.Type()) {
case SizeClass::Tiny:
- bin = &mBins[FloorLog2(aSize >> TINY_MIN_2POW)];
+ bin = &mBins[FloorLog2(aSize / kMinTinyClass)];
break;
case SizeClass::Quantum:
- bin = &mBins[ntbins + (aSize >> QUANTUM_2POW_MIN) - 1];
+ bin = &mBins[ntbins + (aSize / kQuantum) - 1];
break;
case SizeClass::SubPage:
- bin = &mBins[ntbins + nqbins +
- (FloorLog2(aSize >> SMALL_MAX_2POW_DEFAULT) - 1)];
+ bin =
+ &mBins[ntbins + nqbins + (FloorLog2(aSize / kMaxQuantumClass) - 1)];
break;
default:
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unexpected size class type");
}
MOZ_DIAGNOSTIC_ASSERT(aSize == bin->mSizeClass);
{
MutexAutoLock lock(mLock);
@@ -3110,28 +3113,28 @@ arena_t::MallocLarge(size_t aSize, bool
return ret;
}
void*
arena_t::Malloc(size_t aSize, bool aZero)
{
MOZ_DIAGNOSTIC_ASSERT(mMagic == ARENA_MAGIC);
MOZ_ASSERT(aSize != 0);
- MOZ_ASSERT(QUANTUM_CEILING(aSize) <= arena_maxclass);
-
- return (aSize <= bin_maxclass) ? MallocSmall(aSize, aZero)
+ MOZ_ASSERT(QUANTUM_CEILING(aSize) <= gMaxLargeClass);
+
+ return (aSize <= gMaxBinClass) ? MallocSmall(aSize, aZero)
: MallocLarge(aSize, aZero);
}
static inline void*
imalloc(size_t aSize, bool aZero, arena_t* aArena)
{
MOZ_ASSERT(aSize != 0);
- if (aSize <= arena_maxclass) {
+ if (aSize <= gMaxLargeClass) {
aArena = aArena ? aArena : choose_arena(aSize);
return aArena->Malloc(aSize, aZero);
}
return huge_malloc(aSize, aZero);
}
// Only handles large allocations that require more than page alignment.
void*
@@ -3214,17 +3217,17 @@ ipalloc(size_t aAlignment, size_t aSize,
// (ceil_size < aSize) protects against the combination of maximal
// alignment and size greater than maximal alignment.
if (ceil_size < aSize) {
// size_t overflow.
return nullptr;
}
if (ceil_size <= pagesize ||
- (aAlignment <= pagesize && ceil_size <= arena_maxclass)) {
+ (aAlignment <= pagesize && ceil_size <= gMaxLargeClass)) {
aArena = aArena ? aArena : choose_arena(aSize);
ret = aArena->Malloc(ceil_size, false);
} else {
size_t run_size;
// We can't achieve sub-page alignment, so round up alignment
// permanently; it makes later calculations simpler.
aAlignment = PAGE_CEILING(aAlignment);
@@ -3255,17 +3258,17 @@ ipalloc(size_t aAlignment, size_t aSize,
// subtract pagesize, which in the case of overflow
// leaves us with a very large run_size. That causes
// the first conditional below to fail, which means
// that the bogus run_size value never gets used for
// anything important.
run_size = (aAlignment << 1) - pagesize;
}
- if (run_size <= arena_maxclass) {
+ if (run_size <= gMaxLargeClass) {
aArena = aArena ? aArena : choose_arena(aSize);
ret = aArena->Palloc(aAlignment, ceil_size, run_size);
} else if (aAlignment <= chunksize) {
ret = huge_malloc(ceil_size, false);
} else {
ret = huge_palloc(ceil_size, aAlignment, false);
}
}
@@ -3731,28 +3734,28 @@ arena_ralloc_large(void* aPtr, size_t aS
static void*
arena_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena)
{
void* ret;
size_t copysize;
// Try to avoid moving the allocation.
- if (aSize <= bin_maxclass) {
- if (aOldSize <= bin_maxclass && SizeClass(aSize) == SizeClass(aOldSize)) {
+ if (aSize <= gMaxBinClass) {
+ if (aOldSize <= gMaxBinClass && SizeClass(aSize) == SizeClass(aOldSize)) {
if (aSize < aOldSize) {
memset(
(void*)(uintptr_t(aPtr) + aSize), kAllocPoison, aOldSize - aSize);
} else if (opt_zero && aSize > aOldSize) {
memset((void*)(uintptr_t(aPtr) + aOldSize), 0, aSize - aOldSize);
}
return aPtr;
}
- } else if (aOldSize > bin_maxclass && aOldSize <= arena_maxclass) {
- MOZ_ASSERT(aSize > bin_maxclass);
+ } else if (aOldSize > gMaxBinClass && aOldSize <= gMaxLargeClass) {
+ MOZ_ASSERT(aSize > gMaxBinClass);
if (arena_ralloc_large(aPtr, aSize, aOldSize)) {
return aPtr;
}
}
// If we get here, then aSize and aOldSize are different enough that we
// need to move the object. In that case, fall back to allocating new
// space and copying.
@@ -3781,17 +3784,17 @@ iralloc(void* aPtr, size_t aSize, arena_
{
size_t oldsize;
MOZ_ASSERT(aPtr);
MOZ_ASSERT(aSize != 0);
oldsize = isalloc(aPtr);
- return (aSize <= arena_maxclass) ? arena_ralloc(aPtr, aSize, oldsize, aArena)
+ return (aSize <= gMaxLargeClass) ? arena_ralloc(aPtr, aSize, oldsize, aArena)
: huge_ralloc(aPtr, aSize, oldsize);
}
arena_t::arena_t()
{
unsigned i;
arena_bin_t* bin;
size_t prev_run_size;
@@ -3825,18 +3828,18 @@ arena_t::arena_t()
bin->mNonFullRuns.Init();
bin->mSizeClass = sizeClass.Size();
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
bin->mNumRuns = 0;
- // SizeClass doesn't want sizes larger than bin_maxclass for now.
- if (sizeClass.Size() == bin_maxclass) {
+ // SizeClass doesn't want sizes larger than gMaxSubPageClass for now.
+ if (sizeClass.Size() == gMaxSubPageClass) {
break;
}
sizeClass = sizeClass.Next();
}
MOZ_ASSERT(i == ntbins + nqbins + nsbins - 1);
#if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
mMagic = ARENA_MAGIC;
@@ -3967,17 +3970,17 @@ huge_palloc(size_t aSize, size_t aAlignm
static void*
huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize)
{
void* ret;
size_t copysize;
// Avoid moving the allocation if the size class would not change.
- if (aOldSize > arena_maxclass &&
+ if (aOldSize > gMaxLargeClass &&
CHUNK_CEILING(aSize) == CHUNK_CEILING(aOldSize)) {
size_t psize = PAGE_CEILING(aSize);
if (aSize < aOldSize) {
memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
}
#ifdef MALLOC_DECOMMIT
if (psize < aOldSize) {
extent_node_t key;
@@ -4468,20 +4471,20 @@ MozJemalloc::valloc(size_t aSize)
// ***************************************************************************
// Begin non-standard functions.
// This was added by Mozilla for use by SQLite.
template<>
inline size_t
MozJemalloc::malloc_good_size(size_t aSize)
{
- if (aSize <= bin_maxclass) {
+ if (aSize <= gMaxSubPageClass) {
// Small
aSize = SizeClass(aSize).Size();
- } else if (aSize <= arena_maxclass) {
+ } else if (aSize <= gMaxLargeClass) {
// Large.
aSize = PAGE_CEILING(aSize);
} else {
// Huge. We use PAGE_CEILING to get psize, instead of using
// CHUNK_CEILING to get csize. This ensures that this
// malloc_usable_size(malloc(n)) always matches
// malloc_good_size(n).
aSize = PAGE_CEILING(aSize);
@@ -4508,19 +4511,19 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
if (!malloc_initialized) {
memset(aStats, 0, sizeof(*aStats));
return;
}
// Gather runtime settings.
aStats->opt_junk = opt_junk;
aStats->opt_zero = opt_zero;
- aStats->quantum = quantum;
- aStats->small_max = small_max;
- aStats->large_max = arena_maxclass;
+ aStats->quantum = kQuantum;
+ aStats->small_max = kMaxQuantumClass;
+ aStats->large_max = gMaxLargeClass;
aStats->chunksize = chunksize;
aStats->page_size = pagesize;
aStats->dirty_max = opt_dirty_max;
// Gather current memory usage statistics.
aStats->narenas = 0;
aStats->mapped = 0;
aStats->allocated = 0;