--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -605,17 +605,17 @@ struct arena_stats_t
// ***************************************************************************
// Extent data structures.
enum ChunkType
{
UNKNOWN_CHUNK,
ZEROED_CHUNK, // chunk only contains zeroes.
ARENA_CHUNK, // used to back arena runs created by arena_t::AllocRun.
- HUGE_CHUNK, // used to back huge allocations (e.g. huge_malloc).
+ HUGE_CHUNK, // used to back huge allocations (e.g. arena_t::MallocHuge).
RECYCLED_CHUNK, // chunk has been stored for future use by chunk_recycle.
};
// Tree of extents.
struct extent_node_t
{
// Linkage for the size/address-ordered tree.
RedBlackTreeNode<extent_node_t> mLinkBySize;
@@ -1029,17 +1029,21 @@ private:
arena_run_t* GetNonFullBinRun(arena_bin_t* aBin);
inline void* MallocSmall(size_t aSize, bool aZero);
void* MallocLarge(size_t aSize, bool aZero);
void* PallocLarge(size_t aAlignment, size_t aSize, size_t aAllocSize);
+ void* PallocHuge(size_t aSize, size_t aAlignment, bool aZero);
+
public:
+ void* MallocHuge(size_t aSize, bool aZero);
+
inline void* Malloc(size_t aSize, bool aZero);
void* Palloc(size_t aAlignment, size_t aSize);
inline void DallocSmall(arena_chunk_t* aChunk,
void* aPtr,
arena_chunk_map_t* aMapElm);
@@ -1246,20 +1250,16 @@ chunk_alloc(size_t aSize,
size_t aAlignment,
bool aBase,
bool* aZeroed = nullptr);
static void
chunk_dealloc(void* aChunk, size_t aSize, ChunkType aType);
static void
chunk_ensure_zero(void* aPtr, size_t aSize, bool aZeroed);
static void*
-huge_malloc(size_t size, bool zero, arena_t* aArena);
-static void*
-huge_palloc(size_t aSize, size_t aAlignment, bool aZero, arena_t* aArena);
-static void*
huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena);
static void
huge_dalloc(void* aPtr, arena_t* aArena);
static bool
malloc_init_hard();
#ifdef XP_DARWIN
#define FORK_HOOK extern "C"
@@ -3082,17 +3082,17 @@ static inline void*
imalloc(size_t aSize, bool aZero, arena_t* aArena)
{
MOZ_ASSERT(aSize != 0);
MOZ_ASSERT(aArena);
if (aSize <= gMaxLargeClass) {
return aArena->Malloc(aSize, aZero);
}
- return huge_malloc(aSize, aZero, aArena);
+ return aArena->MallocHuge(aSize, aZero);
}
// Only handles large allocations that require more than page alignment.
void*
arena_t::PallocLarge(size_t aAlignment, size_t aSize, size_t aAllocSize)
{
void* ret;
size_t offset;
@@ -3214,19 +3214,19 @@ arena_t::Palloc(size_t aAlignment, size_
// that the bogus run_size value never gets used for
// anything important.
run_size = (aAlignment << 1) - gPageSize;
}
if (run_size <= gMaxLargeClass) {
ret = PallocLarge(aAlignment, ceil_size, run_size);
} else if (aAlignment <= kChunkSize) {
- ret = huge_malloc(ceil_size, false, this);
+ ret = MallocHuge(ceil_size, false);
} else {
- ret = huge_palloc(ceil_size, aAlignment, false, this);
+ ret = PallocHuge(ceil_size, aAlignment, false);
}
}
MOZ_ASSERT((uintptr_t(ret) & (aAlignment - 1)) == 0);
return ret;
}
// Return the size of the allocation pointed to by ptr.
@@ -3835,24 +3835,24 @@ ArenaCollection::CreateArena(bool aIsPri
(aIsPrivate ? mPrivateArenas : mArenas).Insert(ret);
return ret;
}
// End arena.
// ***************************************************************************
// Begin general internal functions.
-static void*
-huge_malloc(size_t size, bool zero, arena_t* aArena)
+void*
+arena_t::MallocHuge(size_t aSize, bool aZero)
{
- return huge_palloc(size, kChunkSize, zero, aArena);
+ return PallocHuge(aSize, kChunkSize, aZero);
}
-static void*
-huge_palloc(size_t aSize, size_t aAlignment, bool aZero, arena_t* aArena)
+void*
+arena_t::PallocHuge(size_t aSize, size_t aAlignment, bool aZero)
{
void* ret;
size_t csize;
size_t psize;
extent_node_t* node;
bool zeroed;
// Allocate one or more contiguous chunks for this request.
@@ -3876,18 +3876,17 @@ huge_palloc(size_t aSize, size_t aAlignm
if (aZero) {
chunk_ensure_zero(ret, csize, zeroed);
}
// Insert node into huge.
node->mAddr = ret;
psize = PAGE_CEILING(aSize);
node->mSize = psize;
- MOZ_ASSERT(aArena);
- node->mArena = aArena;
+ node->mArena = this;
{
MutexAutoLock lock(huge_mtx);
huge.Insert(node);
// Although we allocated space for csize bytes, we indicate that we've
// allocated only psize bytes.
//
@@ -3996,17 +3995,17 @@ huge_ralloc(void* aPtr, size_t aSize, si
memset((void*)((uintptr_t)aPtr + aOldSize), 0, aSize - aOldSize);
}
return aPtr;
}
// If we get here, then aSize and aOldSize are different enough that we
// need to use a different size class. In that case, fall back to
// allocating new space and copying.
- ret = huge_malloc(aSize, false, aArena);
+ ret = aArena->MallocHuge(aSize, false);
if (!ret) {
return nullptr;
}
copysize = (aSize < aOldSize) ? aSize : aOldSize;
#ifdef VM_COPY_MIN
if (copysize >= VM_COPY_MIN) {
pages_copy(ret, aPtr, copysize);