--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -624,18 +624,23 @@ struct extent_node_t
RedBlackTreeNode<extent_node_t> mLinkByAddr;
// Pointer to the extent that this tree node is responsible for.
void* mAddr;
// Total region size.
size_t mSize;
- // What type of chunk is there; used by chunk recycling code.
- ChunkType mChunkType;
+ union {
+ // What type of chunk is there; used for chunk recycling.
+ ChunkType mChunkType;
+
+ // A pointer to the associated arena, for huge allocations.
+ arena_t* mArena;
+ };
};
struct ExtentTreeSzTrait
{
static RedBlackTreeNode<extent_node_t>& GetTreeNode(extent_node_t* aThis)
{
return aThis->mLinkBySize;
}
@@ -1241,21 +1246,21 @@ chunk_alloc(size_t aSize,
size_t aAlignment,
bool aBase,
bool* aZeroed = nullptr);
static void
chunk_dealloc(void* aChunk, size_t aSize, ChunkType aType);
static void
chunk_ensure_zero(void* aPtr, size_t aSize, bool aZeroed);
static void*
-huge_malloc(size_t size, bool zero);
+huge_malloc(size_t size, bool zero, arena_t* aArena);
static void*
-huge_palloc(size_t aSize, size_t aAlignment, bool aZero);
+huge_palloc(size_t aSize, size_t aAlignment, bool aZero, arena_t* aArena);
static void*
-huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize);
+huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena);
static void
huge_dalloc(void* aPtr);
#ifdef XP_WIN
extern "C"
#else
static
#endif
bool
@@ -3083,21 +3088,21 @@ arena_t::Malloc(size_t aSize, bool aZero
: MallocLarge(aSize, aZero);
}
static inline void*
imalloc(size_t aSize, bool aZero, arena_t* aArena)
{
MOZ_ASSERT(aSize != 0);
+ aArena = aArena ? aArena : choose_arena(aSize);
if (aSize <= gMaxLargeClass) {
- aArena = aArena ? aArena : choose_arena(aSize);
return aArena->Malloc(aSize, aZero);
}
- return huge_malloc(aSize, aZero);
+ return huge_malloc(aSize, aZero, aArena);
}
// Only handles large allocations that require more than page alignment.
void*
arena_t::Palloc(size_t aAlignment, size_t aSize, size_t aAllocSize)
{
void* ret;
size_t offset;
@@ -3175,19 +3180,19 @@ ipalloc(size_t aAlignment, size_t aSize,
// (ceil_size < aSize) protects against the combination of maximal
// alignment and size greater than maximal alignment.
if (ceil_size < aSize) {
// size_t overflow.
return nullptr;
}
+ aArena = aArena ? aArena : choose_arena(aSize);
if (ceil_size <= gPageSize ||
(aAlignment <= gPageSize && ceil_size <= gMaxLargeClass)) {
- aArena = aArena ? aArena : choose_arena(aSize);
ret = aArena->Malloc(ceil_size, false);
} else {
size_t run_size;
// We can't achieve sub-page alignment, so round up alignment
// permanently; it makes later calculations simpler.
aAlignment = PAGE_CEILING(aAlignment);
ceil_size = PAGE_CEILING(aSize);
@@ -3218,22 +3223,21 @@ ipalloc(size_t aAlignment, size_t aSize,
// leaves us with a very large run_size. That causes
// the first conditional below to fail, which means
// that the bogus run_size value never gets used for
// anything important.
run_size = (aAlignment << 1) - gPageSize;
}
if (run_size <= gMaxLargeClass) {
- aArena = aArena ? aArena : choose_arena(aSize);
ret = aArena->Palloc(aAlignment, ceil_size, run_size);
} else if (aAlignment <= kChunkSize) {
- ret = huge_malloc(ceil_size, false);
+ ret = huge_malloc(ceil_size, false, aArena);
} else {
- ret = huge_palloc(ceil_size, aAlignment, false);
+ ret = huge_palloc(ceil_size, aAlignment, false, aArena);
}
}
MOZ_ASSERT((uintptr_t(ret) & (aAlignment - 1)) == 0);
return ret;
}
// Return the size of the allocation pointed to by ptr.
@@ -3266,63 +3270,91 @@ arena_salloc(const void* ptr)
class AllocInfo
{
public:
template<bool Validate = false>
static inline AllocInfo Get(const void* aPtr)
{
// If the allocator is not initialized, the pointer can't belong to it.
if (Validate && malloc_initialized == false) {
- return AllocInfo(0);
+ return AllocInfo();
}
auto chunk = GetChunkForPtr(aPtr);
if (Validate) {
if (!chunk || !gChunkRTree.Get(chunk)) {
- return AllocInfo(0);
+ return AllocInfo();
}
}
if (chunk != aPtr) {
MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
- return AllocInfo(arena_salloc(aPtr));
+ return AllocInfo(arena_salloc(aPtr), chunk);
}
extent_node_t key;
// Huge allocation
key.mAddr = chunk;
MutexAutoLock lock(huge_mtx);
extent_node_t* node = huge.Search(&key);
if (Validate && !node) {
- return AllocInfo(0);
+ return AllocInfo();
}
- return AllocInfo(node->mSize);
+ return AllocInfo(node->mSize, node);
}
// Validate ptr before assuming that it points to an allocation. Currently,
// the following validation is performed:
//
// + Check that ptr is not nullptr.
//
// + Check that ptr lies within a mapped chunk.
static inline AllocInfo GetValidated(const void* aPtr)
{
return Get<true>(aPtr);
}
- explicit AllocInfo(size_t aSize)
+ AllocInfo()
+ : mSize(0)
+ , mChunk(nullptr)
+ {
+ }
+
+ explicit AllocInfo(size_t aSize, arena_chunk_t* aChunk)
: mSize(aSize)
+ , mChunk(aChunk)
{
+ MOZ_ASSERT(mSize <= gMaxLargeClass);
+ }
+
+ explicit AllocInfo(size_t aSize, extent_node_t* aNode)
+ : mSize(aSize)
+ , mNode(aNode)
+ {
+ MOZ_ASSERT(mSize > gMaxLargeClass);
}
size_t Size() { return mSize; }
+ arena_t* Arena()
+ {
+ return (mSize <= gMaxLargeClass) ? mChunk->arena : mNode->mArena;
+ }
+
private:
size_t mSize;
+ union {
+ // Pointer to the chunk associated with the allocation for small
+ // and large allocations.
+ arena_chunk_t* mChunk;
+
+ // Pointer to the extent node for huge allocations.
+ extent_node_t* mNode;
+ };
};
template<>
inline void
MozJemalloc::jemalloc_ptr_info(const void* aPtr, jemalloc_ptr_info_t* aInfo)
{
arena_chunk_t* chunk = GetChunkForPtr(aPtr);
@@ -3646,41 +3678,39 @@ arena_t::RallocGrowLarge(arena_chunk_t*
return false;
}
// Try to resize a large allocation, in order to avoid copying. This will
// always fail if growing an object, and the following run is already in use.
// Returns whether reallocation was successful.
static bool
-arena_ralloc_large(void* aPtr, size_t aSize, size_t aOldSize)
+arena_ralloc_large(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena)
{
size_t psize;
psize = PAGE_CEILING(aSize);
if (psize == aOldSize) {
// Same size class.
if (aSize < aOldSize) {
memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
}
return true;
}
arena_chunk_t* chunk = GetChunkForPtr(aPtr);
- arena_t* arena = chunk->arena;
- MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
if (psize < aOldSize) {
// Fill before shrinking in order avoid a race.
memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
- arena->RallocShrinkLarge(chunk, aPtr, psize, aOldSize);
+ aArena->RallocShrinkLarge(chunk, aPtr, psize, aOldSize);
return true;
}
- bool ret = arena->RallocGrowLarge(chunk, aPtr, psize, aOldSize);
+ bool ret = aArena->RallocGrowLarge(chunk, aPtr, psize, aOldSize);
if (ret && opt_zero) {
memset((void*)((uintptr_t)aPtr + aOldSize), 0, aSize - aOldSize);
}
return ret;
}
static void*
arena_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena)
@@ -3696,29 +3726,24 @@ arena_ralloc(void* aPtr, size_t aSize, s
(void*)(uintptr_t(aPtr) + aSize), kAllocPoison, aOldSize - aSize);
} else if (opt_zero && aSize > aOldSize) {
memset((void*)(uintptr_t(aPtr) + aOldSize), 0, aSize - aOldSize);
}
return aPtr;
}
} else if (aOldSize > gMaxBinClass && aOldSize <= gMaxLargeClass) {
MOZ_ASSERT(aSize > gMaxBinClass);
- if (arena_ralloc_large(aPtr, aSize, aOldSize)) {
+ if (arena_ralloc_large(aPtr, aSize, aOldSize, aArena)) {
return aPtr;
}
}
// If we get here, then aSize and aOldSize are different enough that we
// need to move the object. In that case, fall back to allocating new
// space and copying.
- if (!aArena) {
- arena_chunk_t* chunk = GetChunkForPtr(aPtr);
- aArena = chunk->arena;
- MOZ_DIAGNOSTIC_ASSERT(aArena->mMagic == ARENA_MAGIC);
- }
ret = aArena->Malloc(aSize, false);
if (!ret) {
return nullptr;
}
// Junk/zero-filling were already done by arena_t::Malloc().
copysize = (aSize < aOldSize) ? aSize : aOldSize;
#ifdef VM_COPY_MIN
@@ -3731,25 +3756,27 @@ arena_ralloc(void* aPtr, size_t aSize, s
}
idalloc(aPtr);
return ret;
}
static inline void*
iralloc(void* aPtr, size_t aSize, arena_t* aArena)
{
- size_t oldsize;
-
MOZ_ASSERT(aPtr);
MOZ_ASSERT(aSize != 0);
- oldsize = AllocInfo::Get(aPtr).Size();
+ auto info = AllocInfo::Get(aPtr);
+ aArena = aArena ? aArena : info.Arena();
+ size_t oldsize = info.Size();
+ MOZ_RELEASE_ASSERT(aArena);
+ MOZ_DIAGNOSTIC_ASSERT(aArena->mMagic == ARENA_MAGIC);
return (aSize <= gMaxLargeClass) ? arena_ralloc(aPtr, aSize, oldsize, aArena)
- : huge_ralloc(aPtr, aSize, oldsize);
+ : huge_ralloc(aPtr, aSize, oldsize, aArena);
}
arena_t::arena_t()
{
unsigned i;
MOZ_RELEASE_ASSERT(mLock.Init());
@@ -3816,23 +3843,23 @@ ArenaCollection::CreateArena(bool aIsPri
return ret;
}
// End arena.
// ***************************************************************************
// Begin general internal functions.
static void*
-huge_malloc(size_t size, bool zero)
+huge_malloc(size_t size, bool zero, arena_t* aArena)
{
- return huge_palloc(size, kChunkSize, zero);
+ return huge_palloc(size, kChunkSize, zero, aArena);
}
static void*
-huge_palloc(size_t aSize, size_t aAlignment, bool aZero)
+huge_palloc(size_t aSize, size_t aAlignment, bool aZero, arena_t* aArena)
{
void* ret;
size_t csize;
size_t psize;
extent_node_t* node;
bool zeroed;
// Allocate one or more contiguous chunks for this request.
@@ -3856,16 +3883,17 @@ huge_palloc(size_t aSize, size_t aAlignm
if (aZero) {
chunk_ensure_zero(ret, csize, zeroed);
}
// Insert node into huge.
node->mAddr = ret;
psize = PAGE_CEILING(aSize);
node->mSize = psize;
+ node->mArena = aArena ? aArena : choose_arena(aSize);
{
MutexAutoLock lock(huge_mtx);
huge.Insert(node);
// Although we allocated space for csize bytes, we indicate that we've
// allocated only psize bytes.
//
@@ -3909,17 +3937,17 @@ huge_palloc(size_t aSize, size_t aAlignm
#endif
}
}
return ret;
}
static void*
-huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize)
+huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena)
{
void* ret;
size_t copysize;
// Avoid moving the allocation if the size class would not change.
if (aOldSize > gMaxLargeClass &&
CHUNK_CEILING(aSize) == CHUNK_CEILING(aOldSize)) {
size_t psize = PAGE_CEILING(aSize);
@@ -3972,17 +4000,17 @@ huge_ralloc(void* aPtr, size_t aSize, si
memset((void*)((uintptr_t)aPtr + aOldSize), 0, aSize - aOldSize);
}
return aPtr;
}
// If we get here, then aSize and aOldSize are different enough that we
// need to use a different size class. In that case, fall back to
// allocating new space and copying.
- ret = huge_malloc(aSize, false);
+ ret = huge_malloc(aSize, false, aArena);
if (!ret) {
return nullptr;
}
copysize = (aSize < aOldSize) ? aSize : aOldSize;
#ifdef VM_COPY_MIN
if (copysize >= VM_COPY_MIN) {
pages_copy(ret, aPtr, copysize);