--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -1031,42 +1031,48 @@ private:
bool dirty);
arena_run_t* GetNonFullBinRun(arena_bin_t* aBin);
inline void* MallocSmall(size_t aSize, bool aZero);
void* MallocLarge(size_t aSize, bool aZero);
+ void* MallocHuge(size_t aSize, bool aZero);
+
void* PallocLarge(size_t aAlignment, size_t aSize, size_t aAllocSize);
void* PallocHuge(size_t aSize, size_t aAlignment, bool aZero);
+ void RallocShrinkLarge(arena_chunk_t* aChunk,
+ void* aPtr,
+ size_t aSize,
+ size_t aOldSize);
+
+ bool RallocGrowLarge(arena_chunk_t* aChunk,
+ void* aPtr,
+ size_t aSize,
+ size_t aOldSize);
+
+ void* RallocSmallOrLarge(void* aPtr, size_t aSize, size_t aOldSize);
+
+ void* RallocHuge(void* aPtr, size_t aSize, size_t aOldSize);
public:
- void* MallocHuge(size_t aSize, bool aZero);
inline void* Malloc(size_t aSize, bool aZero);
void* Palloc(size_t aAlignment, size_t aSize);
inline void DallocSmall(arena_chunk_t* aChunk,
void* aPtr,
arena_chunk_map_t* aMapElm);
void DallocLarge(arena_chunk_t* aChunk, void* aPtr);
- void RallocShrinkLarge(arena_chunk_t* aChunk,
- void* aPtr,
- size_t aSize,
- size_t aOldSize);
-
- bool RallocGrowLarge(arena_chunk_t* aChunk,
- void* aPtr,
- size_t aSize,
- size_t aOldSize);
+ void* Ralloc(void* aPtr, size_t aSize, size_t aOldSize);
void Purge(bool aAll);
void HardPurge();
void* operator new(size_t aCount) = delete;
void* operator new(size_t aCount, const fallible_t&)
@@ -1253,18 +1259,16 @@ static void*
chunk_alloc(size_t aSize,
size_t aAlignment,
bool aBase,
bool* aZeroed = nullptr);
static void
chunk_dealloc(void* aChunk, size_t aSize, ChunkType aType);
static void
chunk_ensure_zero(void* aPtr, size_t aSize, bool aZeroed);
-static void*
-huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena);
static void
huge_dalloc(void* aPtr, arena_t* aArena);
static bool
malloc_init_hard();
#ifdef XP_DARWIN
#define FORK_HOOK extern "C"
#else
@@ -3660,18 +3664,18 @@ arena_t::RallocGrowLarge(arena_chunk_t*
mStats.allocated_large += aSize - aOldSize;
return true;
}
return false;
}
-static void*
-arena_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena)
+void*
+arena_t::RallocSmallOrLarge(void* aPtr, size_t aSize, size_t aOldSize)
{
void* ret;
size_t copysize;
SizeClass sizeClass(aSize);
// Try to avoid moving the allocation.
if (aOldSize <= gMaxLargeClass && sizeClass.Size() == aOldSize) {
if (aSize < aOldSize) {
@@ -3681,64 +3685,58 @@ arena_ralloc(void* aPtr, size_t aSize, s
return aPtr;
}
if (sizeClass.Type() == SizeClass::Large && aOldSize > gMaxBinClass &&
aOldSize <= gMaxLargeClass) {
arena_chunk_t* chunk = GetChunkForPtr(aPtr);
if (sizeClass.Size() < aOldSize) {
// Fill before shrinking in order to avoid a race.
memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
- aArena->RallocShrinkLarge(chunk, aPtr, sizeClass.Size(), aOldSize);
+ RallocShrinkLarge(chunk, aPtr, sizeClass.Size(), aOldSize);
return aPtr;
}
- if (aArena->RallocGrowLarge(chunk, aPtr, sizeClass.Size(), aOldSize)) {
+ if (RallocGrowLarge(chunk, aPtr, sizeClass.Size(), aOldSize)) {
if (opt_zero) {
memset((void*)((uintptr_t)aPtr + aOldSize), 0, aSize - aOldSize);
}
return aPtr;
}
}
// If we get here, then aSize and aOldSize are different enough that we
// need to move the object. In that case, fall back to allocating new
// space and copying.
- ret = aArena->Malloc(aSize, false);
+ ret = Malloc(aSize, false);
if (!ret) {
return nullptr;
}
// Junk/zero-filling were already done by arena_t::Malloc().
copysize = (aSize < aOldSize) ? aSize : aOldSize;
#ifdef VM_COPY_MIN
if (copysize >= VM_COPY_MIN) {
pages_copy(ret, aPtr, copysize);
} else
#endif
{
memcpy(ret, aPtr, copysize);
}
- idalloc(aPtr, aArena);
+ idalloc(aPtr, this);
return ret;
}
-static inline void*
-iralloc(void* aPtr, size_t aSize, arena_t* aArena)
+void*
+arena_t::Ralloc(void* aPtr, size_t aSize, size_t aOldSize)
{
+ MOZ_DIAGNOSTIC_ASSERT(mMagic == ARENA_MAGIC);
MOZ_ASSERT(aPtr);
MOZ_ASSERT(aSize != 0);
- auto info = AllocInfo::Get(aPtr);
- auto arena = info.Arena();
- MOZ_RELEASE_ASSERT(!aArena || arena == aArena);
- aArena = aArena ? aArena : arena;
- size_t oldsize = info.Size();
- MOZ_DIAGNOSTIC_ASSERT(aArena->mMagic == ARENA_MAGIC);
-
- return (aSize <= gMaxLargeClass) ? arena_ralloc(aPtr, aSize, oldsize, aArena)
- : huge_ralloc(aPtr, aSize, oldsize, aArena);
+ return (aSize <= gMaxLargeClass) ? RallocSmallOrLarge(aPtr, aSize, aOldSize)
+ : RallocHuge(aPtr, aSize, aOldSize);
}
arena_t::arena_t(arena_params_t* aParams)
{
unsigned i;
MOZ_RELEASE_ASSERT(mLock.Init());
@@ -3900,18 +3898,18 @@ arena_t::PallocHuge(size_t aSize, size_t
memset(ret, 0, csize);
#endif
}
}
return ret;
}
-static void*
-huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena)
+void*
+arena_t::RallocHuge(void* aPtr, size_t aSize, size_t aOldSize)
{
void* ret;
size_t copysize;
// Avoid moving the allocation if the size class would not change.
if (aOldSize > gMaxLargeClass &&
CHUNK_CEILING(aSize) == CHUNK_CEILING(aOldSize)) {
size_t psize = PAGE_CEILING(aSize);
@@ -3925,17 +3923,17 @@ huge_ralloc(void* aPtr, size_t aSize, si
pages_decommit((void*)((uintptr_t)aPtr + psize), aOldSize - psize);
// Update recorded size.
MutexAutoLock lock(huge_mtx);
key.mAddr = const_cast<void*>(aPtr);
extent_node_t* node = huge.Search(&key);
MOZ_ASSERT(node);
MOZ_ASSERT(node->mSize == aOldSize);
- MOZ_RELEASE_ASSERT(node->mArena == aArena);
+ MOZ_RELEASE_ASSERT(node->mArena == this);
huge_allocated -= aOldSize - psize;
// No need to change huge_mapped, because we didn't (un)map anything.
node->mSize = psize;
} else if (psize > aOldSize) {
if (!pages_commit((void*)((uintptr_t)aPtr + aOldSize),
psize - aOldSize)) {
return nullptr;
}
@@ -3950,47 +3948,47 @@ huge_ralloc(void* aPtr, size_t aSize, si
if (psize > aOldSize) {
// Update recorded size.
extent_node_t key;
MutexAutoLock lock(huge_mtx);
key.mAddr = const_cast<void*>(aPtr);
extent_node_t* node = huge.Search(&key);
MOZ_ASSERT(node);
MOZ_ASSERT(node->mSize == aOldSize);
- MOZ_RELEASE_ASSERT(node->mArena == aArena);
+ MOZ_RELEASE_ASSERT(node->mArena == this);
huge_allocated += psize - aOldSize;
// No need to change huge_mapped, because we didn't
// (un)map anything.
node->mSize = psize;
}
if (opt_zero && aSize > aOldSize) {
memset((void*)((uintptr_t)aPtr + aOldSize), 0, aSize - aOldSize);
}
return aPtr;
}
// If we get here, then aSize and aOldSize are different enough that we
// need to use a different size class. In that case, fall back to
// allocating new space and copying.
- ret = aArena->MallocHuge(aSize, false);
+ ret = MallocHuge(aSize, false);
if (!ret) {
return nullptr;
}
copysize = (aSize < aOldSize) ? aSize : aOldSize;
#ifdef VM_COPY_MIN
if (copysize >= VM_COPY_MIN) {
pages_copy(ret, aPtr, copysize);
} else
#endif
{
memcpy(ret, aPtr, copysize);
}
- idalloc(aPtr, aArena);
+ idalloc(aPtr, this);
return ret;
}
static void
huge_dalloc(void* aPtr, arena_t* aArena)
{
extent_node_t* node;
{
@@ -4300,17 +4298,20 @@ BaseAllocator::realloc(void* aPtr, size_
if (aSize == 0) {
aSize = 1;
}
if (aPtr) {
MOZ_RELEASE_ASSERT(malloc_initialized);
- ret = iralloc(aPtr, aSize, mArena);
+ auto info = AllocInfo::Get(aPtr);
+ auto arena = info.Arena();
+ MOZ_RELEASE_ASSERT(!mArena || arena == mArena);
+ ret = arena->Ralloc(aPtr, aSize, info.Size());
if (!ret) {
errno = ENOMEM;
}
} else {
if (!malloc_init()) {
ret = nullptr;
} else {