Bug 1406303 - Don't heap-allocate the global chunk radix tree. r?njn
Now that the radix tree structure has a fixed size, we can just allocate
the chunk radix tree object statically.
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -672,17 +672,17 @@ class AddressRadixTree {
static const size_t kHeight = (Bits + kBitsPerLevel - 1) / kBitsPerLevel;
static_assert(kBitsAtLevel1 + (kHeight - 1) * kBitsPerLevel == Bits,
"AddressRadixTree parameters don't work out");
malloc_spinlock_t mLock;
void** mRoot;
public:
- static AddressRadixTree<Bits>* Create();
+ bool Init();
inline void* Get(void* aAddr);
// Returns whether the value was properly set
inline bool Set(void* aAddr, void* aValue);
inline bool Unset(void* aAddr)
{
@@ -1064,17 +1064,17 @@ struct ArenaTreeTrait
}
};
/********/
/*
* Chunks.
*/
-static AddressRadixTree<(SIZEOF_PTR << 3) - CHUNK_2POW_DEFAULT>* gChunkRTree;
+static AddressRadixTree<(SIZEOF_PTR << 3) - CHUNK_2POW_DEFAULT> gChunkRTree;
/* Protects chunk-related data structures. */
static malloc_mutex_t chunks_mtx;
/*
* Trees of chunks that were previously allocated (trees differ only in node
* ordering). These are used when allocating chunks, in an attempt to re-use
* address space. Depending on function, different tree orderings are needed,
@@ -1745,35 +1745,23 @@ pages_copy(void *dest, const void *src,
MOZ_ASSERT((void *)((uintptr_t)src & ~pagesize_mask) == src);
vm_copy(mach_task_self(), (vm_address_t)src, (vm_size_t)n,
(vm_address_t)dest);
}
#endif
template <size_t Bits>
-AddressRadixTree<Bits>*
-AddressRadixTree<Bits>::Create()
+bool
+AddressRadixTree<Bits>::Init()
{
- AddressRadixTree<Bits>* ret;
-
- ret = (AddressRadixTree<Bits>*)base_calloc(1, sizeof(AddressRadixTree<Bits>));
- if (!ret) {
- return nullptr;
- }
-
- malloc_spin_init(&ret->mLock);
-
- ret->mRoot = (void**)base_calloc(1 << kBitsAtLevel1, sizeof(void*));
- if (!ret->mRoot) {
- // We leak the rtree here, since there's no generic base deallocation.
- return nullptr;
- }
-
- return ret;
+ malloc_spin_init(&mLock);
+
+ mRoot = (void**)base_calloc(1 << kBitsAtLevel1, sizeof(void*));
+ return mRoot;
}
template <size_t Bits>
void**
AddressRadixTree<Bits>::GetSlot(void* aKey, bool aCreate)
{
uintptr_t key = reinterpret_cast<uintptr_t>(aKey);
uintptr_t subkey;
@@ -2141,17 +2129,17 @@ chunk_alloc(size_t size, size_t alignmen
goto RETURN;
}
/* All strategies for allocation failed. */
ret = nullptr;
RETURN:
if (ret && base == false) {
- if (!gChunkRTree->Set(ret, ret)) {
+ if (!gChunkRTree.Set(ret, ret)) {
chunk_dealloc(ret, size, UNKNOWN_CHUNK);
return nullptr;
}
}
MOZ_ASSERT(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
}
@@ -2273,17 +2261,17 @@ static void
chunk_dealloc(void *chunk, size_t size, ChunkType type)
{
MOZ_ASSERT(chunk);
MOZ_ASSERT(CHUNK_ADDR2BASE(chunk) == chunk);
MOZ_ASSERT(size != 0);
MOZ_ASSERT((size & chunksize_mask) == 0);
- gChunkRTree->Unset(chunk);
+ gChunkRTree.Unset(chunk);
if (CAN_RECYCLE(size)) {
size_t recycled_so_far = load_acquire_z(&recycled_size);
// In case some race condition put us above the limit.
if (recycled_so_far < recycle_limit) {
size_t recycle_remaining = recycle_limit - recycled_so_far;
size_t to_recycle;
if (size > recycle_remaining) {
@@ -3475,17 +3463,17 @@ isalloc_validate(const void* ptr)
return 0;
}
arena_chunk_t* chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(ptr);
if (!chunk) {
return 0;
}
- if (!gChunkRTree->Get(chunk)) {
+ if (!gChunkRTree.Get(chunk)) {
return 0;
}
if (chunk != ptr) {
MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
return arena_salloc(ptr);
} else {
size_t ret;
@@ -3563,17 +3551,17 @@ MozJemalloc::jemalloc_ptr_info(const voi
*aInfo = { TagLiveHuge, node->addr, node->size };
}
malloc_mutex_unlock(&huge_mtx);
if (node) {
return;
}
// It's not a huge allocation. Check if we have a known chunk.
- if (!gChunkRTree->Get(chunk)) {
+ if (!gChunkRTree.Get(chunk)) {
*aInfo = { TagUnknown, nullptr, 0 };
return;
}
MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
// Get the page number within the chunk.
size_t pageind = (((uintptr_t)aPtr - (uintptr_t)chunk) >> pagesize_2pow);
@@ -4503,18 +4491,17 @@ MALLOC_OUT:
* reset to the default value for the main arena. */
gMainArena->mMaxDirty = opt_dirty_max;
/*
* Assign the initial arena to the initial thread.
*/
thread_arena.set(gMainArena);
- gChunkRTree = AddressRadixTree<(SIZEOF_PTR << 3) - CHUNK_2POW_DEFAULT>::Create();
- if (!gChunkRTree) {
+ if (!gChunkRTree.Init()) {
return true;
}
malloc_initialized = true;
#if !defined(XP_WIN) && !defined(XP_DARWIN)
/* Prevent potential deadlock on malloc locks after fork. */
pthread_atfork(_malloc_prefork, _malloc_postfork_parent, _malloc_postfork_child);