--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -641,31 +641,45 @@ struct ExtentTreeBoundsTrait : public Ex
}
};
/******************************************************************************/
/*
* Radix tree data structures.
*/
-/*
- * Size of each radix tree node (must be a power of 2). This impacts tree
- * depth.
- */
+class AddressRadixTree {
+ // Size of each radix tree node (must be a power of 2).
+ // This impacts tree depth.
#if (SIZEOF_PTR == 4)
-# define MALLOC_RTREE_NODESIZE (1U << 14)
+ static const size_t kNodeSize = 1U << 14;
#else
-# define MALLOC_RTREE_NODESIZE CACHELINE
+ static const size_t kNodeSize = CACHELINE;
#endif
-struct malloc_rtree_t {
- malloc_spinlock_t lock;
- void **root;
- unsigned height;
- unsigned level2bits[1]; /* Dynamically sized. */
+ malloc_spinlock_t mLock;
+ void** mRoot;
+ unsigned mHeight;
+ unsigned mLevel2Bits[1]; // Dynamically sized.
+
+public:
+ static AddressRadixTree* Create(unsigned aBits);
+
+ inline void* Get(void* aAddr);
+
+ // Returns whether the value was properly set
+ inline bool Set(void* aAddr, void* aValue);
+
+ inline bool Unset(void* aAddr)
+ {
+ return Set(aAddr, nullptr);
+ }
+
+private:
+ inline void** GetSlot(void* aAddr, bool aCreate = false);
};
/******************************************************************************/
/*
* Arena data structures.
*/
struct arena_t;
@@ -1034,17 +1048,17 @@ struct ArenaTreeTrait
}
};
/********/
/*
* Chunks.
*/
-static malloc_rtree_t *chunk_rtree;
+static AddressRadixTree* gChunkRTree;
/* Protects chunk-related data structures. */
static malloc_mutex_t chunks_mtx;
/*
* Trees of chunks that were previously allocated (trees differ only in node
* ordering). These are used when allocating chunks, in an attempt to re-use
* address space. Depending on function, different tree orderings are needed,
@@ -1714,136 +1728,138 @@ pages_copy(void *dest, const void *src,
MOZ_ASSERT(n >= VM_COPY_MIN);
MOZ_ASSERT((void *)((uintptr_t)src & ~pagesize_mask) == src);
vm_copy(mach_task_self(), (vm_address_t)src, (vm_size_t)n,
(vm_address_t)dest);
}
#endif
-static inline malloc_rtree_t *
-malloc_rtree_new(unsigned bits)
+AddressRadixTree*
+AddressRadixTree::Create(unsigned aBits)
{
- malloc_rtree_t *ret;
- unsigned bits_per_level, height, i;
-
- bits_per_level = ffs(pow2_ceil((MALLOC_RTREE_NODESIZE /
- sizeof(void *)))) - 1;
- height = bits / bits_per_level;
- if (height * bits_per_level != bits)
- height++;
- MOZ_DIAGNOSTIC_ASSERT(height * bits_per_level >= bits);
-
- ret = (malloc_rtree_t*)base_calloc(1, sizeof(malloc_rtree_t) +
- (sizeof(unsigned) * (height - 1)));
- if (!ret)
- return nullptr;
-
- malloc_spin_init(&ret->lock);
- ret->height = height;
- if (bits_per_level * height > bits)
- ret->level2bits[0] = bits % bits_per_level;
- else
- ret->level2bits[0] = bits_per_level;
- for (i = 1; i < height; i++)
- ret->level2bits[i] = bits_per_level;
-
- ret->root = (void**)base_calloc(1, sizeof(void *) << ret->level2bits[0]);
- if (!ret->root) {
- /*
- * We leak the rtree here, since there's no generic base
- * deallocation.
- */
- return nullptr;
- }
-
- return (ret);
+ AddressRadixTree* ret;
+ unsigned bits_per_level, height, i;
+
+ bits_per_level = ffs(pow2_ceil((AddressRadixTree::kNodeSize /
+ sizeof(void*)))) - 1;
+ height = aBits / bits_per_level;
+ if (height * bits_per_level != aBits) {
+ height++;
+ }
+ MOZ_DIAGNOSTIC_ASSERT(height * bits_per_level >= aBits);
+
+ ret = (AddressRadixTree*)base_calloc(1, sizeof(AddressRadixTree) +
+ (sizeof(unsigned) * (height - 1)));
+ if (!ret) {
+ return nullptr;
+ }
+
+ malloc_spin_init(&ret->mLock);
+ ret->mHeight = height;
+ if (bits_per_level * height > aBits) {
+ ret->mLevel2Bits[0] = aBits % bits_per_level;
+ } else {
+ ret->mLevel2Bits[0] = bits_per_level;
+ }
+ for (i = 1; i < height; i++) {
+ ret->mLevel2Bits[i] = bits_per_level;
+ }
+
+ ret->mRoot = (void**)base_calloc(1 << ret->mLevel2Bits[0], sizeof(void*));
+ if (!ret->mRoot) {
+ // We leak the rtree here, since there's no generic base deallocation.
+ return nullptr;
+ }
+
+ return ret;
}
-static inline void**
-malloc_rtree_get_slot(malloc_rtree_t* aTree, uintptr_t aKey, bool aCreate = false)
+void**
+AddressRadixTree::GetSlot(void* aKey, bool aCreate)
{
+ uintptr_t key = reinterpret_cast<uintptr_t>(aKey);
uintptr_t subkey;
unsigned i, lshift, height, bits;
void** node;
void** child;
- for (i = lshift = 0, height = aTree->height, node = aTree->root;
+ for (i = lshift = 0, height = mHeight, node = mRoot;
i < height - 1;
i++, lshift += bits, node = child) {
- bits = aTree->level2bits[i];
- subkey = (aKey << lshift) >> ((SIZEOF_PTR << 3) - bits);
+ bits = mLevel2Bits[i];
+ subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);
child = (void**) node[subkey];
if (!child && aCreate) {
- child = (void**) base_calloc(1 << aTree->level2bits[i + 1], sizeof(void*));
+ child = (void**) base_calloc(1 << mLevel2Bits[i + 1], sizeof(void*));
if (child) {
node[subkey] = child;
}
}
if (!child) {
return nullptr;
}
}
/*
* node is a leaf, so it contains values rather than node
* pointers.
*/
- bits = aTree->level2bits[i];
- subkey = (aKey << lshift) >> ((SIZEOF_PTR << 3) - bits);
+ bits = mLevel2Bits[i];
+ subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);
return &node[subkey];
}
-static inline void*
-malloc_rtree_get(malloc_rtree_t* aTree, uintptr_t aKey)
+void*
+AddressRadixTree::Get(void* aKey)
{
void* ret = nullptr;
- void** slot = malloc_rtree_get_slot(aTree, aKey);
+ void** slot = GetSlot(aKey);
if (slot) {
ret = *slot;
}
#ifdef MOZ_DEBUG
- malloc_spin_lock(&aTree->lock);
+ malloc_spin_lock(&mlock);
/*
* Suppose that it were possible for a jemalloc-allocated chunk to be
* munmap()ped, followed by a different allocator in another thread re-using
* overlapping virtual memory, all without invalidating the cached rtree
* value. The result would be a false positive (the rtree would claim that
* jemalloc owns memory that it had actually discarded). I don't think this
* scenario is possible, but the following assertion is a prudent sanity
* check.
*/
if (!slot) {
// In case a slot has been created in the meantime.
- slot = malloc_rtree_get_slot(aTree, aKey);
+ slot = GetSlot(aKey);
}
if (slot) {
// The malloc_spin_lock call above should act as a memory barrier, forcing
// the compiler to emit a new read instruction for *slot.
MOZ_ASSERT(ret == *slot);
} else {
MOZ_ASSERT(ret == nullptr);
}
- malloc_spin_unlock(&aTree->lock);
+ malloc_spin_unlock(&mlock);
#endif
return ret;
}
-static inline bool
-malloc_rtree_set(malloc_rtree_t *aTree, uintptr_t aKey, void* aValue)
+bool
+AddressRadixTree::Set(void* aKey, void* aValue)
{
- malloc_spin_lock(&aTree->lock);
- void** slot = malloc_rtree_get_slot(aTree, aKey, /* create */ true);
+ malloc_spin_lock(&mLock);
+ void** slot = GetSlot(aKey, /* create */ true);
if (slot) {
*slot = aValue;
}
- malloc_spin_unlock(&aTree->lock);
- return !slot;
+ malloc_spin_unlock(&mLock);
+ return slot;
}
/* pages_trim, chunk_alloc_mmap_slow and chunk_alloc_mmap were cherry-picked
* from upstream jemalloc 3.4.1 to fix Mozilla bug 956501. */
/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
((size_t)((uintptr_t)(a) & (alignment - 1)))
@@ -2124,17 +2140,17 @@ chunk_alloc(size_t size, size_t alignmen
goto RETURN;
}
/* All strategies for allocation failed. */
ret = nullptr;
RETURN:
if (ret && base == false) {
- if (malloc_rtree_set(chunk_rtree, (uintptr_t)ret, ret)) {
+ if (!gChunkRTree->Set(ret, ret)) {
chunk_dealloc(ret, size, UNKNOWN_CHUNK);
return nullptr;
}
}
MOZ_ASSERT(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
}
@@ -2256,17 +2272,17 @@ static void
chunk_dealloc(void *chunk, size_t size, ChunkType type)
{
MOZ_ASSERT(chunk);
MOZ_ASSERT(CHUNK_ADDR2BASE(chunk) == chunk);
MOZ_ASSERT(size != 0);
MOZ_ASSERT((size & chunksize_mask) == 0);
- malloc_rtree_set(chunk_rtree, (uintptr_t)chunk, nullptr);
+ gChunkRTree->Unset(chunk);
if (CAN_RECYCLE(size)) {
size_t recycled_so_far = load_acquire_z(&recycled_size);
// In case some race condition put us above the limit.
if (recycled_so_far < recycle_limit) {
size_t recycle_remaining = recycle_limit - recycled_so_far;
size_t to_recycle;
if (size > recycle_remaining) {
@@ -3458,17 +3474,17 @@ isalloc_validate(const void* ptr)
return 0;
}
arena_chunk_t* chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(ptr);
if (!chunk) {
return 0;
}
- if (!malloc_rtree_get(chunk_rtree, (uintptr_t)chunk)) {
+ if (!gChunkRTree->Get(chunk)) {
return 0;
}
if (chunk != ptr) {
MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
return arena_salloc(ptr);
} else {
size_t ret;
@@ -3528,35 +3544,35 @@ MozJemalloc::jemalloc_ptr_info(const voi
arena_chunk_t* chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(aPtr);
// Is the pointer null, or within one chunk's size of null?
if (!chunk) {
*aInfo = { TagUnknown, nullptr, 0 };
return;
}
- // Look for huge allocations before looking for |chunk| in chunk_rtree.
- // This is necessary because |chunk| won't be in chunk_rtree if it's
+ // Look for huge allocations before looking for |chunk| in gChunkRTree.
+ // This is necessary because |chunk| won't be in gChunkRTree if it's
// the second or subsequent chunk in a huge allocation.
extent_node_t* node;
extent_node_t key;
malloc_mutex_lock(&huge_mtx);
key.addr = const_cast<void*>(aPtr);
node = reinterpret_cast<
RedBlackTree<extent_node_t, ExtentTreeBoundsTrait>*>(&huge)->Search(&key);
if (node) {
*aInfo = { TagLiveHuge, node->addr, node->size };
}
malloc_mutex_unlock(&huge_mtx);
if (node) {
return;
}
// It's not a huge allocation. Check if we have a known chunk.
- if (!malloc_rtree_get(chunk_rtree, (uintptr_t)chunk)) {
+ if (!gChunkRTree->Get(chunk)) {
*aInfo = { TagUnknown, nullptr, 0 };
return;
}
MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
// Get the page number within the chunk.
size_t pageind = (((uintptr_t)aPtr - (uintptr_t)chunk) >> pagesize_2pow);
@@ -4486,18 +4502,18 @@ MALLOC_OUT:
* reset to the default value for the main arena. */
gMainArena->mMaxDirty = opt_dirty_max;
/*
* Assign the initial arena to the initial thread.
*/
thread_arena.set(gMainArena);
- chunk_rtree = malloc_rtree_new((SIZEOF_PTR << 3) - CHUNK_2POW_DEFAULT);
- if (!chunk_rtree) {
+ gChunkRTree = AddressRadixTree::Create((SIZEOF_PTR << 3) - CHUNK_2POW_DEFAULT);
+ if (!gChunkRTree) {
return true;
}
malloc_initialized = true;
#if !defined(XP_WIN) && !defined(XP_DARWIN)
/* Prevent potential deadlock on malloc locks after fork. */
pthread_atfork(_malloc_prefork, _malloc_postfork_parent, _malloc_postfork_child);