--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -107,16 +107,17 @@
*******************************************************************************
*/
#include "mozmemory_wrap.h"
#include "mozjemalloc.h"
#include "mozilla/Sprintf.h"
#include "mozilla/Likely.h"
#include "mozilla/DoublyLinkedList.h"
+#include "mozilla/GuardObjects.h"
/*
* On Linux, we use madvise(MADV_DONTNEED) to release memory back to the
* operating system. If we release 1MB of live pages with MADV_DONTNEED, our
* RSS will decrease by 1MB (almost) immediately.
*
* On Mac, we use madvise(MADV_FREE). Unlike MADV_DONTNEED on Linux, MADV_FREE
* on Mac doesn't cause the OS to release the specified pages immediately; the
@@ -517,16 +518,32 @@ struct Mutex
inline bool Init();
inline void Lock();
inline void Unlock();
};
+struct MOZ_RAII MutexAutoLock
+{
+ explicit MutexAutoLock(Mutex& aMutex MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ : mMutex(aMutex)
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ mMutex.Lock();
+ }
+
+ ~MutexAutoLock() { mMutex.Unlock(); }
+
+private:
+ MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER;
+ Mutex& mMutex;
+};
+
/* Set to true once the allocator has been initialized. */
static bool malloc_initialized = false;
#if defined(XP_WIN)
/* No init lock for Windows. */
#elif defined(XP_DARWIN)
static Mutex gInitLock = { OS_SPINLOCK_INIT };
#elif defined(XP_LINUX) && !defined(ANDROID)
@@ -1166,19 +1183,19 @@ static size_t opt_dirty_max = DIRTY_MAX_
* Begin forward declarations.
*/
static void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zeroed=nullptr);
static void chunk_dealloc(void *chunk, size_t size, ChunkType chunk_type);
static void chunk_ensure_zero(void* ptr, size_t size, bool zeroed);
static arena_t *arenas_extend();
static void *huge_malloc(size_t size, bool zero);
-static void *huge_palloc(size_t size, size_t alignment, bool zero);
-static void *huge_ralloc(void *ptr, size_t size, size_t oldsize);
-static void huge_dalloc(void *ptr);
+static void* huge_palloc(size_t aSize, size_t aAlignment, bool aZero);
+static void* huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize);
+static void huge_dalloc(void* aPtr);
#ifdef XP_WIN
extern "C"
#else
static
#endif
bool malloc_init_hard(void);
#ifdef XP_DARWIN
@@ -1445,52 +1462,49 @@ base_pages_alloc(size_t minsize)
pages_decommit(base_next_decommitted, csize - pminsize);
# endif
base_mapped += csize;
base_committed += pminsize;
return (false);
}
-static void *
-base_alloc(size_t size)
+static void*
+base_alloc(size_t aSize)
{
- void *ret;
- size_t csize;
-
- /* Round size up to nearest multiple of the cacheline size. */
- csize = CACHELINE_CEILING(size);
-
- base_mtx.Lock();
- /* Make sure there's enough space for the allocation. */
- if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
- if (base_pages_alloc(csize)) {
- base_mtx.Unlock();
- return nullptr;
- }
- }
- /* Allocate. */
- ret = base_next_addr;
- base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
- /* Make sure enough pages are committed for the new allocation. */
- if ((uintptr_t)base_next_addr > (uintptr_t)base_next_decommitted) {
- void *pbase_next_addr =
- (void *)(PAGE_CEILING((uintptr_t)base_next_addr));
+ void* ret;
+ size_t csize;
+
+ /* Round size up to nearest multiple of the cacheline size. */
+ csize = CACHELINE_CEILING(aSize);
+
+ MutexAutoLock lock(base_mtx);
+ /* Make sure there's enough space for the allocation. */
+ if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
+ if (base_pages_alloc(csize)) {
+ return nullptr;
+ }
+ }
+ /* Allocate. */
+ ret = base_next_addr;
+ base_next_addr = (void*)((uintptr_t)base_next_addr + csize);
+ /* Make sure enough pages are committed for the new allocation. */
+ if ((uintptr_t)base_next_addr > (uintptr_t)base_next_decommitted) {
+ void* pbase_next_addr = (void*)(PAGE_CEILING((uintptr_t)base_next_addr));
# ifdef MALLOC_DECOMMIT
- pages_commit(base_next_decommitted, (uintptr_t)pbase_next_addr -
- (uintptr_t)base_next_decommitted);
+ pages_commit(base_next_decommitted, (uintptr_t)pbase_next_addr -
+ (uintptr_t)base_next_decommitted);
# endif
- base_next_decommitted = pbase_next_addr;
- base_committed += (uintptr_t)pbase_next_addr -
- (uintptr_t)base_next_decommitted;
- }
- base_mtx.Unlock();
-
- return (ret);
+ base_next_decommitted = pbase_next_addr;
+ base_committed += (uintptr_t)pbase_next_addr -
+ (uintptr_t)base_next_decommitted;
+ }
+
+ return ret;
}
static void *
base_calloc(size_t number, size_t size)
{
void *ret;
ret = base_alloc(number * size);
@@ -1513,23 +1527,21 @@ base_node_alloc(void)
base_mtx.Unlock();
ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
}
return (ret);
}
static void
-base_node_dealloc(extent_node_t *node)
+base_node_dealloc(extent_node_t* aNode)
{
-
- base_mtx.Lock();
- *(extent_node_t **)node = base_nodes;
- base_nodes = node;
- base_mtx.Unlock();
+ MutexAutoLock lock(base_mtx);
+ *(extent_node_t**)aNode = base_nodes;
+ base_nodes = aNode;
}
/*
* End Utility functions/macros.
*/
/******************************************************************************/
/*
* Begin chunk management functions.
@@ -1737,52 +1749,50 @@ AddressRadixTree<Bits>::Get(void* aKey)
void* ret = nullptr;
void** slot = GetSlot(aKey);
if (slot) {
ret = *slot;
}
#ifdef MOZ_DEBUG
- mlock.Lock();
+ MutexAutoLock lock(mLock);
/*
* Suppose that it were possible for a jemalloc-allocated chunk to be
* munmap()ped, followed by a different allocator in another thread re-using
* overlapping virtual memory, all without invalidating the cached rtree
* value. The result would be a false positive (the rtree would claim that
* jemalloc owns memory that it had actually discarded). I don't think this
* scenario is possible, but the following assertion is a prudent sanity
* check.
*/
if (!slot) {
// In case a slot has been created in the meantime.
slot = GetSlot(aKey);
}
if (slot) {
- // The Lock() call above should act as a memory barrier, forcing
+ // The MutexAutoLock above should act as a memory barrier, forcing
// the compiler to emit a new read instruction for *slot.
MOZ_ASSERT(ret == *slot);
} else {
MOZ_ASSERT(ret == nullptr);
}
- mlock.Unlock();
#endif
return ret;
}
template <size_t Bits>
bool
AddressRadixTree<Bits>::Set(void* aKey, void* aValue)
{
- mLock.Lock();
+ MutexAutoLock lock(mLock);
void** slot = GetSlot(aKey, /* create */ true);
if (slot) {
*slot = aValue;
}
- mLock.Unlock();
return slot;
}
/* pages_trim, chunk_alloc_mmap_slow and chunk_alloc_mmap were cherry-picked
* from upstream jemalloc 3.4.1 to fix Mozilla bug 956501. */
/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
@@ -3124,31 +3134,31 @@ arena_t::MallocSmall(size_t aSize, bool
} else {
/* Sub-page. */
aSize = pow2_ceil(aSize);
bin = &mBins[ntbins + nqbins
+ (ffs((int)(aSize >> SMALL_MAX_2POW_DEFAULT)) - 2)];
}
MOZ_DIAGNOSTIC_ASSERT(aSize == bin->reg_size);
- mLock.Lock();
- if ((run = bin->runcur) && run->nfree > 0) {
- ret = MallocBinEasy(bin, run);
- } else {
- ret = MallocBinHard(bin);
+ {
+ MutexAutoLock lock(mLock);
+ if ((run = bin->runcur) && run->nfree > 0) {
+ ret = MallocBinEasy(bin, run);
+ } else {
+ ret = MallocBinHard(bin);
+ }
+
+ if (!ret) {
+ return nullptr;
+ }
+
+ mStats.allocated_small += aSize;
}
- if (!ret) {
- mLock.Unlock();
- return nullptr;
- }
-
- mStats.allocated_small += aSize;
- mLock.Unlock();
-
if (aZero == false) {
if (opt_junk) {
memset(ret, kAllocJunk, aSize);
} else if (opt_zero) {
memset(ret, 0, aSize);
}
} else
memset(ret, 0, aSize);
@@ -3158,24 +3168,25 @@ arena_t::MallocSmall(size_t aSize, bool
void*
arena_t::MallocLarge(size_t aSize, bool aZero)
{
void* ret;
/* Large allocation. */
aSize = PAGE_CEILING(aSize);
- mLock.Lock();
- ret = AllocRun(nullptr, aSize, true, aZero);
- if (!ret) {
- mLock.Unlock();
- return nullptr;
+
+ {
+ MutexAutoLock lock(mLock);
+ ret = AllocRun(nullptr, aSize, true, aZero);
+ if (!ret) {
+ return nullptr;
+ }
+ mStats.allocated_large += aSize;
}
- mStats.allocated_large += aSize;
- mLock.Unlock();
if (aZero == false) {
if (opt_junk) {
memset(ret, kAllocJunk, aSize);
} else if (opt_zero) {
memset(ret, 0, aSize);
}
}
@@ -3212,50 +3223,50 @@ arena_t::Palloc(size_t aAlignment, size_
{
void* ret;
size_t offset;
arena_chunk_t* chunk;
MOZ_ASSERT((aSize & pagesize_mask) == 0);
MOZ_ASSERT((aAlignment & pagesize_mask) == 0);
- mLock.Lock();
- ret = AllocRun(nullptr, aAllocSize, true, false);
- if (!ret) {
- mLock.Unlock();
- return nullptr;
+ {
+ MutexAutoLock lock(mLock);
+ ret = AllocRun(nullptr, aAllocSize, true, false);
+ if (!ret) {
+ return nullptr;
+ }
+
+ chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(ret);
+
+ offset = uintptr_t(ret) & (aAlignment - 1);
+ MOZ_ASSERT((offset & pagesize_mask) == 0);
+ MOZ_ASSERT(offset < aAllocSize);
+ if (offset == 0) {
+ TrimRunTail(chunk, (arena_run_t*)ret, aAllocSize, aSize, false);
+ } else {
+ size_t leadsize, trailsize;
+
+ leadsize = aAlignment - offset;
+ if (leadsize > 0) {
+ TrimRunHead(chunk, (arena_run_t*)ret, aAllocSize, aAllocSize - leadsize);
+ ret = (void*)(uintptr_t(ret) + leadsize);
+ }
+
+ trailsize = aAllocSize - leadsize - aSize;
+ if (trailsize != 0) {
+ /* Trim trailing space. */
+ MOZ_ASSERT(trailsize < aAllocSize);
+ TrimRunTail(chunk, (arena_run_t*)ret, aSize + trailsize, aSize, false);
+ }
+ }
+
+ mStats.allocated_large += aSize;
}
- chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(ret);
-
- offset = uintptr_t(ret) & (aAlignment - 1);
- MOZ_ASSERT((offset & pagesize_mask) == 0);
- MOZ_ASSERT(offset < aAllocSize);
- if (offset == 0) {
- TrimRunTail(chunk, (arena_run_t*)ret, aAllocSize, aSize, false);
- } else {
- size_t leadsize, trailsize;
-
- leadsize = aAlignment - offset;
- if (leadsize > 0) {
- TrimRunHead(chunk, (arena_run_t*)ret, aAllocSize, aAllocSize - leadsize);
- ret = (void*)(uintptr_t(ret) + leadsize);
- }
-
- trailsize = aAllocSize - leadsize - aSize;
- if (trailsize != 0) {
- /* Trim trailing space. */
- MOZ_ASSERT(trailsize < aAllocSize);
- TrimRunTail(chunk, (arena_run_t*)ret, aSize + trailsize, aSize, false);
- }
- }
-
- mStats.allocated_large += aSize;
- mLock.Unlock();
-
if (opt_junk) {
memset(ret, kAllocJunk, aSize);
} else if (opt_zero) {
memset(ret, 0, aSize);
}
return ret;
}
@@ -3386,85 +3397,74 @@ arena_salloc(const void *ptr)
* Validate ptr before assuming that it points to an allocation. Currently,
* the following validation is performed:
*
* + Check that ptr is not nullptr.
*
* + Check that ptr lies within a mapped chunk.
*/
static inline size_t
-isalloc_validate(const void* ptr)
+isalloc_validate(const void* aPtr)
{
/* If the allocator is not initialized, the pointer can't belong to it. */
if (malloc_initialized == false) {
return 0;
}
- arena_chunk_t* chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(ptr);
+ auto chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(aPtr);
if (!chunk) {
return 0;
}
if (!gChunkRTree.Get(chunk)) {
return 0;
}
- if (chunk != ptr) {
+ if (chunk != aPtr) {
MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
- return arena_salloc(ptr);
- } else {
- size_t ret;
- extent_node_t* node;
- extent_node_t key;
-
- /* Chunk. */
- key.addr = (void*)chunk;
- huge_mtx.Lock();
- node = huge.Search(&key);
- if (node)
- ret = node->size;
- else
- ret = 0;
- huge_mtx.Unlock();
- return ret;
+ return arena_salloc(aPtr);
}
+
+ extent_node_t key;
+
+ /* Chunk. */
+ key.addr = (void*)chunk;
+ MutexAutoLock lock(huge_mtx);
+ extent_node_t* node = huge.Search(&key);
+ if (node) {
+ return node->size;
+ }
+ return 0;
}
static inline size_t
-isalloc(const void *ptr)
+isalloc(const void* aPtr)
{
- size_t ret;
- arena_chunk_t *chunk;
-
- MOZ_ASSERT(ptr);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk != ptr) {
- /* Region. */
- MOZ_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
-
- ret = arena_salloc(ptr);
- } else {
- extent_node_t *node, key;
-
- /* Chunk (huge allocation). */
-
- huge_mtx.Lock();
-
- /* Extract from tree of huge allocations. */
- key.addr = const_cast<void*>(ptr);
- node = huge.Search(&key);
- MOZ_DIAGNOSTIC_ASSERT(node);
-
- ret = node->size;
-
- huge_mtx.Unlock();
- }
-
- return (ret);
+ MOZ_ASSERT(aPtr);
+
+ auto chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(aPtr);
+ if (chunk != aPtr) {
+ /* Region. */
+ MOZ_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
+
+ return arena_salloc(aPtr);
+ }
+
+ extent_node_t key;
+
+ /* Chunk (huge allocation). */
+
+ MutexAutoLock lock(huge_mtx);
+
+ /* Extract from tree of huge allocations. */
+ key.addr = const_cast<void*>(aPtr);
+ extent_node_t* node = huge.Search(&key);
+ MOZ_DIAGNOSTIC_ASSERT(node);
+
+ return node->size;
}
template<> inline void
MozJemalloc::jemalloc_ptr_info(const void* aPtr, jemalloc_ptr_info_t* aInfo)
{
arena_chunk_t* chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(aPtr);
// Is the pointer null, or within one chunk's size of null?
@@ -3473,26 +3473,25 @@ MozJemalloc::jemalloc_ptr_info(const voi
return;
}
// Look for huge allocations before looking for |chunk| in gChunkRTree.
// This is necessary because |chunk| won't be in gChunkRTree if it's
// the second or subsequent chunk in a huge allocation.
extent_node_t* node;
extent_node_t key;
- huge_mtx.Lock();
- key.addr = const_cast<void*>(aPtr);
- node = reinterpret_cast<
- RedBlackTree<extent_node_t, ExtentTreeBoundsTrait>*>(&huge)->Search(&key);
- if (node) {
- *aInfo = { TagLiveHuge, node->addr, node->size };
- }
- huge_mtx.Unlock();
- if (node) {
- return;
+ {
+ MutexAutoLock lock(huge_mtx);
+ key.addr = const_cast<void*>(aPtr);
+ node = reinterpret_cast<
+ RedBlackTree<extent_node_t, ExtentTreeBoundsTrait>*>(&huge)->Search(&key);
+ if (node) {
+ *aInfo = { TagLiveHuge, node->addr, node->size };
+ return;
+ }
}
// It's not a huge allocation. Check if we have a known chunk.
if (!gChunkRTree.Get(chunk)) {
*aInfo = { TagUnknown, nullptr, 0 };
return;
}
@@ -3669,44 +3668,38 @@ arena_t::DallocLarge(arena_chunk_t* aChu
memset(aPtr, kAllocPoison, size);
mStats.allocated_large -= size;
DallocRun((arena_run_t*)aPtr, true);
}
static inline void
-arena_dalloc(void *ptr, size_t offset)
+arena_dalloc(void* aPtr, size_t aOffset)
{
- arena_chunk_t *chunk;
- arena_t *arena;
- size_t pageind;
- arena_chunk_map_t *mapelm;
-
- MOZ_ASSERT(ptr);
- MOZ_ASSERT(offset != 0);
- MOZ_ASSERT(CHUNK_ADDR2OFFSET(ptr) == offset);
-
- chunk = (arena_chunk_t *) ((uintptr_t)ptr - offset);
- arena = chunk->arena;
- MOZ_ASSERT(arena);
- MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
-
- arena->mLock.Lock();
- pageind = offset >> pagesize_2pow;
- mapelm = &chunk->map[pageind];
- MOZ_DIAGNOSTIC_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
- if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
- /* Small allocation. */
- arena->DallocSmall(chunk, ptr, mapelm);
- } else {
- /* Large allocation. */
- arena->DallocLarge(chunk, ptr);
- }
- arena->mLock.Unlock();
+ MOZ_ASSERT(aPtr);
+ MOZ_ASSERT(aOffset != 0);
+ MOZ_ASSERT(CHUNK_ADDR2OFFSET(aPtr) == aOffset);
+
+ auto chunk = (arena_chunk_t*) ((uintptr_t)aPtr - aOffset);
+ auto arena = chunk->arena;
+ MOZ_ASSERT(arena);
+ MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
+
+ MutexAutoLock lock(arena->mLock);
+ size_t pageind = aOffset >> pagesize_2pow;
+ arena_chunk_map_t* mapelm = &chunk->map[pageind];
+ MOZ_DIAGNOSTIC_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
+ if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
+ /* Small allocation. */
+ arena->DallocSmall(chunk, aPtr, mapelm);
+ } else {
+ /* Large allocation. */
+ arena->DallocLarge(chunk, aPtr);
+ }
}
static inline void
idalloc(void *ptr)
{
size_t offset;
MOZ_ASSERT(ptr);
@@ -3723,30 +3716,29 @@ arena_t::RallocShrinkLarge(arena_chunk_t
size_t aOldSize)
{
MOZ_ASSERT(aSize < aOldSize);
/*
* Shrink the run, and make trailing pages available for other
* allocations.
*/
- mLock.Lock();
+ MutexAutoLock lock(mLock);
TrimRunTail(aChunk, (arena_run_t*)aPtr, aOldSize, aSize, true);
mStats.allocated_large -= aOldSize - aSize;
- mLock.Unlock();
}
bool
arena_t::RallocGrowLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
size_t aOldSize)
{
size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> pagesize_2pow;
size_t npages = aOldSize >> pagesize_2pow;
- mLock.Lock();
+ MutexAutoLock lock(mLock);
MOZ_DIAGNOSTIC_ASSERT(aOldSize == (aChunk->map[pageind].bits & ~pagesize_mask));
/* Try to extend the run. */
MOZ_ASSERT(aSize > aOldSize);
if (pageind + npages < chunk_npages && (aChunk->map[pageind+npages].bits
& CHUNK_MAP_ALLOCATED) == 0 && (aChunk->map[pageind+npages].bits &
~pagesize_mask) >= aSize - aOldSize) {
/*
@@ -3759,20 +3751,18 @@ arena_t::RallocGrowLarge(arena_chunk_t*
false);
aChunk->map[pageind].bits = aSize | CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED;
aChunk->map[pageind+npages].bits = CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED;
mStats.allocated_large += aSize - aOldSize;
- mLock.Unlock();
return false;
}
- mLock.Unlock();
return true;
}
/*
* Try to resize a large allocation, in order to avoid copying. This will
* always fail if growing an object, and the following run is already in use.
*/
@@ -3994,23 +3984,22 @@ arenas_extend()
/* Allocate enough space for trailing bins. */
ret = (arena_t*)base_alloc(sizeof(arena_t) +
(sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
if (!ret || !ret->Init()) {
return arenas_fallback();
}
- arenas_lock.Lock();
+ MutexAutoLock lock(arenas_lock);
// TODO: Use random Ids.
ret->mId = narenas++;
gArenaTree.Insert(ret);
- arenas_lock.Unlock();
return ret;
}
/*
* End arena.
*/
/******************************************************************************/
/*
@@ -4018,212 +4007,210 @@ arenas_extend()
*/
static void *
huge_malloc(size_t size, bool zero)
{
return huge_palloc(size, chunksize, zero);
}
-static void *
-huge_palloc(size_t size, size_t alignment, bool zero)
+static void*
+huge_palloc(size_t aSize, size_t aAlignment, bool aZero)
{
- void *ret;
- size_t csize;
- size_t psize;
- extent_node_t *node;
- bool zeroed;
-
- /* Allocate one or more contiguous chunks for this request. */
-
- csize = CHUNK_CEILING(size);
- if (csize == 0) {
- /* size is large enough to cause size_t wrap-around. */
- return nullptr;
- }
-
- /* Allocate an extent node with which to track the chunk. */
- node = base_node_alloc();
- if (!node)
- return nullptr;
-
- ret = chunk_alloc(csize, alignment, false, &zeroed);
- if (!ret) {
- base_node_dealloc(node);
- return nullptr;
- }
- if (zero) {
- chunk_ensure_zero(ret, csize, zeroed);
- }
-
- /* Insert node into huge. */
- node->addr = ret;
- psize = PAGE_CEILING(size);
- node->size = psize;
-
- huge_mtx.Lock();
- huge.Insert(node);
- huge_nmalloc++;
-
- /* Although we allocated space for csize bytes, we indicate that we've
- * allocated only psize bytes.
- *
- * If DECOMMIT is defined, this is a reasonable thing to do, since
- * we'll explicitly decommit the bytes in excess of psize.
- *
- * If DECOMMIT is not defined, then we're relying on the OS to be lazy
- * about how it allocates physical pages to mappings. If we never
- * touch the pages in excess of psize, the OS won't allocate a physical
- * page, and we won't use more than psize bytes of physical memory.
- *
- * A correct program will only touch memory in excess of how much it
- * requested if it first calls malloc_usable_size and finds out how
- * much space it has to play with. But because we set node->size =
- * psize above, malloc_usable_size will return psize, not csize, and
- * the program will (hopefully) never touch bytes in excess of psize.
- * Thus those bytes won't take up space in physical memory, and we can
- * reasonably claim we never "allocated" them in the first place. */
- huge_allocated += psize;
- huge_mapped += csize;
- huge_mtx.Unlock();
+ void* ret;
+ size_t csize;
+ size_t psize;
+ extent_node_t* node;
+ bool zeroed;
+
+ /* Allocate one or more contiguous chunks for this request. */
+
+ csize = CHUNK_CEILING(aSize);
+ if (csize == 0) {
+ /* size is large enough to cause size_t wrap-around. */
+ return nullptr;
+ }
+
+ /* Allocate an extent node with which to track the chunk. */
+ node = base_node_alloc();
+ if (!node) {
+ return nullptr;
+ }
+
+ ret = chunk_alloc(csize, aAlignment, false, &zeroed);
+ if (!ret) {
+ base_node_dealloc(node);
+ return nullptr;
+ }
+ if (aZero) {
+ chunk_ensure_zero(ret, csize, zeroed);
+ }
+
+ /* Insert node into huge. */
+ node->addr = ret;
+ psize = PAGE_CEILING(aSize);
+ node->size = psize;
+
+ {
+ MutexAutoLock lock(huge_mtx);
+ huge.Insert(node);
+ huge_nmalloc++;
+
+ /* Although we allocated space for csize bytes, we indicate that we've
+ * allocated only psize bytes.
+ *
+ * If DECOMMIT is defined, this is a reasonable thing to do, since
+ * we'll explicitly decommit the bytes in excess of psize.
+ *
+ * If DECOMMIT is not defined, then we're relying on the OS to be lazy
+ * about how it allocates physical pages to mappings. If we never
+ * touch the pages in excess of psize, the OS won't allocate a physical
+ * page, and we won't use more than psize bytes of physical memory.
+ *
+ * A correct program will only touch memory in excess of how much it
+ * requested if it first calls malloc_usable_size and finds out how
+ * much space it has to play with. But because we set node->size =
+ * psize above, malloc_usable_size will return psize, not csize, and
+ * the program will (hopefully) never touch bytes in excess of psize.
+ * Thus those bytes won't take up space in physical memory, and we can
+ * reasonably claim we never "allocated" them in the first place. */
+ huge_allocated += psize;
+ huge_mapped += csize;
+ }
#ifdef MALLOC_DECOMMIT
- if (csize - psize > 0)
- pages_decommit((void *)((uintptr_t)ret + psize), csize - psize);
+ if (csize - psize > 0)
+ pages_decommit((void*)((uintptr_t)ret + psize), csize - psize);
#endif
- if (zero == false) {
- if (opt_junk)
+ if (aZero == false) {
+ if (opt_junk) {
# ifdef MALLOC_DECOMMIT
- memset(ret, kAllocJunk, psize);
+ memset(ret, kAllocJunk, psize);
# else
- memset(ret, kAllocJunk, csize);
+ memset(ret, kAllocJunk, csize);
# endif
- else if (opt_zero)
+ } else if (opt_zero) {
# ifdef MALLOC_DECOMMIT
- memset(ret, 0, psize);
+ memset(ret, 0, psize);
# else
- memset(ret, 0, csize);
+ memset(ret, 0, csize);
# endif
- }
-
- return (ret);
+ }
+ }
+
+ return ret;
}
-static void *
-huge_ralloc(void *ptr, size_t size, size_t oldsize)
+static void*
+huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize)
{
- void *ret;
- size_t copysize;
-
- /* Avoid moving the allocation if the size class would not change. */
-
- if (oldsize > arena_maxclass &&
- CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
- size_t psize = PAGE_CEILING(size);
- if (size < oldsize) {
- memset((void *)((uintptr_t)ptr + size), kAllocPoison, oldsize
- - size);
- }
+ void* ret;
+ size_t copysize;
+
+ /* Avoid moving the allocation if the size class would not change. */
+
+ if (aOldSize > arena_maxclass &&
+ CHUNK_CEILING(aSize) == CHUNK_CEILING(aOldSize)) {
+ size_t psize = PAGE_CEILING(aSize);
+ if (aSize < aOldSize) {
+ memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
+ }
#ifdef MALLOC_DECOMMIT
- if (psize < oldsize) {
- extent_node_t *node, key;
-
- pages_decommit((void *)((uintptr_t)ptr + psize),
- oldsize - psize);
-
- /* Update recorded size. */
- huge_mtx.Lock();
- key.addr = const_cast<void*>(ptr);
- node = huge.Search(&key);
- MOZ_ASSERT(node);
- MOZ_ASSERT(node->size == oldsize);
- huge_allocated -= oldsize - psize;
- /* No need to change huge_mapped, because we didn't
- * (un)map anything. */
- node->size = psize;
- huge_mtx.Unlock();
- } else if (psize > oldsize) {
- pages_commit((void *)((uintptr_t)ptr + oldsize),
- psize - oldsize);
- }
+ if (psize < aOldSize) {
+ extent_node_t key;
+
+ pages_decommit((void*)((uintptr_t)aPtr + psize), aOldSize - psize);
+
+ /* Update recorded size. */
+ MutexAutoLock lock(huge_mtx);
+ key.addr = const_cast<void*>(aPtr);
+ extent_node_t* node = huge.Search(&key);
+ MOZ_ASSERT(node);
+ MOZ_ASSERT(node->size == aOldSize);
+ huge_allocated -= aOldSize - psize;
+ /* No need to change huge_mapped, because we didn't (un)map anything. */
+ node->size = psize;
+ } else if (psize > aOldSize) {
+ pages_commit((void*)((uintptr_t)aPtr + aOldSize), psize - aOldSize);
+ }
#endif
- /* Although we don't have to commit or decommit anything if
- * DECOMMIT is not defined and the size class didn't change, we
- * do need to update the recorded size if the size increased,
- * so malloc_usable_size doesn't return a value smaller than
- * what was requested via realloc(). */
-
- if (psize > oldsize) {
- /* Update recorded size. */
- extent_node_t *node, key;
- huge_mtx.Lock();
- key.addr = const_cast<void*>(ptr);
- node = huge.Search(&key);
- MOZ_ASSERT(node);
- MOZ_ASSERT(node->size == oldsize);
- huge_allocated += psize - oldsize;
- /* No need to change huge_mapped, because we didn't
- * (un)map anything. */
- node->size = psize;
- huge_mtx.Unlock();
- }
-
- if (opt_zero && size > oldsize) {
- memset((void *)((uintptr_t)ptr + oldsize), 0, size
- - oldsize);
- }
- return (ptr);
- }
-
- /*
- * If we get here, then size and oldsize are different enough that we
- * need to use a different size class. In that case, fall back to
- * allocating new space and copying.
- */
- ret = huge_malloc(size, false);
- if (!ret)
- return nullptr;
-
- copysize = (size < oldsize) ? size : oldsize;
+ /* Although we don't have to commit or decommit anything if
+ * DECOMMIT is not defined and the size class didn't change, we
+ * do need to update the recorded size if the size increased,
+ * so malloc_usable_size doesn't return a value smaller than
+ * what was requested via realloc(). */
+ if (psize > aOldSize) {
+ /* Update recorded size. */
+ extent_node_t key;
+ MutexAutoLock lock(huge_mtx);
+ key.addr = const_cast<void*>(aPtr);
+ extent_node_t* node = huge.Search(&key);
+ MOZ_ASSERT(node);
+ MOZ_ASSERT(node->size == aOldSize);
+ huge_allocated += psize - aOldSize;
+ /* No need to change huge_mapped, because we didn't
+ * (un)map anything. */
+ node->size = psize;
+ }
+
+ if (opt_zero && aSize > aOldSize) {
+ memset((void*)((uintptr_t)aPtr + aOldSize), 0, aSize - aOldSize);
+ }
+ return aPtr;
+ }
+
+ /*
+ * If we get here, then aSize and aOldSize are different enough that we
+ * need to use a different size class. In that case, fall back to
+ * allocating new space and copying.
+ */
+ ret = huge_malloc(aSize, false);
+ if (!ret) {
+ return nullptr;
+ }
+
+ copysize = (aSize < aOldSize) ? aSize : aOldSize;
#ifdef VM_COPY_MIN
- if (copysize >= VM_COPY_MIN)
- pages_copy(ret, ptr, copysize);
- else
+ if (copysize >= VM_COPY_MIN) {
+ pages_copy(ret, aPtr, copysize);
+ } else
#endif
- memcpy(ret, ptr, copysize);
- idalloc(ptr);
- return (ret);
+ {
+ memcpy(ret, aPtr, copysize);
+ }
+ idalloc(aPtr);
+ return ret;
}
static void
-huge_dalloc(void *ptr)
+huge_dalloc(void* aPtr)
{
- extent_node_t *node, key;
-
- huge_mtx.Lock();
-
- /* Extract from tree of huge allocations. */
- key.addr = ptr;
- node = huge.Search(&key);
- MOZ_ASSERT(node);
- MOZ_ASSERT(node->addr == ptr);
- huge.Remove(node);
-
- huge_ndalloc++;
- huge_allocated -= node->size;
- huge_mapped -= CHUNK_CEILING(node->size);
-
- huge_mtx.Unlock();
-
- /* Unmap chunk. */
- chunk_dealloc(node->addr, CHUNK_CEILING(node->size), HUGE_CHUNK);
-
- base_node_dealloc(node);
+ extent_node_t* node;
+ {
+ extent_node_t key;
+ MutexAutoLock lock(huge_mtx);
+
+ /* Extract from tree of huge allocations. */
+ key.addr = aPtr;
+ node = huge.Search(&key);
+ MOZ_ASSERT(node);
+ MOZ_ASSERT(node->addr == aPtr);
+ huge.Remove(node);
+
+ huge_ndalloc++;
+ huge_allocated -= node->size;
+ huge_mapped -= CHUNK_CEILING(node->size);
+ }
+
+ /* Unmap chunk. */
+ chunk_dealloc(node->addr, CHUNK_CEILING(node->size), HUGE_CHUNK);
+
+ base_node_dealloc(node);
}
/*
* FreeBSD's pthreads implementation calls malloc(3), so the malloc
* implementation has to take pains to avoid infinite recursion during
* initialization.
*/
#if defined(XP_WIN)
@@ -4263,27 +4250,24 @@ static
bool
malloc_init_hard(void)
{
unsigned i;
const char *opts;
long result;
#ifndef XP_WIN
- gInitLock.Lock();
+ MutexAutoLock lock(gInitLock);
#endif
if (malloc_initialized) {
/*
* Another thread initialized the allocator before this one
* acquired gInitLock.
*/
-#ifndef XP_WIN
- gInitLock.Unlock();
-#endif
return false;
}
if (!thread_arena.init()) {
return false;
}
/* Get page size and number of CPUs */
@@ -4414,19 +4398,16 @@ MALLOC_OUT:
/*
* Initialize one arena here.
*/
gArenaTree.Init();
arenas_extend();
gMainArena = gArenaTree.First();
if (!gMainArena) {
-#ifndef XP_WIN
- gInitLock.Unlock();
-#endif
return true;
}
/* arena_t::Init() sets this to a lower value for thread local arenas;
* reset to the default value for the main arena. */
gMainArena->mMaxDirty = opt_dirty_max;
/*
* Assign the initial arena to the initial thread.
@@ -4439,19 +4420,16 @@ MALLOC_OUT:
malloc_initialized = true;
#if !defined(XP_WIN) && !defined(XP_DARWIN)
/* Prevent potential deadlock on malloc locks after fork. */
pthread_atfork(_malloc_prefork, _malloc_postfork_parent, _malloc_postfork_child);
#endif
-#ifndef XP_WIN
- gInitLock.Unlock();
-#endif
return false;
}
/*
* End general internal functions.
*/
/******************************************************************************/
/*
@@ -4757,74 +4735,76 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
aStats->waste = 0;
aStats->page_cache = 0;
aStats->bookkeeping = 0;
aStats->bin_unused = 0;
non_arena_mapped = 0;
/* Get huge mapped/allocated. */
- huge_mtx.Lock();
- non_arena_mapped += huge_mapped;
- aStats->allocated += huge_allocated;
- MOZ_ASSERT(huge_mapped >= huge_allocated);
- huge_mtx.Unlock();
+ {
+ MutexAutoLock lock(huge_mtx);
+ non_arena_mapped += huge_mapped;
+ aStats->allocated += huge_allocated;
+ MOZ_ASSERT(huge_mapped >= huge_allocated);
+ }
/* Get base mapped/allocated. */
- base_mtx.Lock();
- non_arena_mapped += base_mapped;
- aStats->bookkeeping += base_committed;
- MOZ_ASSERT(base_mapped >= base_committed);
- base_mtx.Unlock();
+ {
+ MutexAutoLock lock(base_mtx);
+ non_arena_mapped += base_mapped;
+ aStats->bookkeeping += base_committed;
+ MOZ_ASSERT(base_mapped >= base_committed);
+ }
arenas_lock.Lock();
/* Iterate over arenas. */
for (auto arena : gArenaTree.iter()) {
size_t arena_mapped, arena_allocated, arena_committed, arena_dirty, j,
arena_unused, arena_headers;
arena_run_t* run;
if (!arena) {
continue;
}
arena_headers = 0;
arena_unused = 0;
- arena->mLock.Lock();
-
- arena_mapped = arena->mStats.mapped;
-
- /* "committed" counts dirty and allocated memory. */
- arena_committed = arena->mStats.committed << pagesize_2pow;
-
- arena_allocated = arena->mStats.allocated_small +
- arena->mStats.allocated_large;
-
- arena_dirty = arena->mNumDirty << pagesize_2pow;
-
- for (j = 0; j < ntbins + nqbins + nsbins; j++) {
- arena_bin_t* bin = &arena->mBins[j];
- size_t bin_unused = 0;
-
- for (auto mapelm : bin->runs.iter()) {
- run = (arena_run_t*)(mapelm->bits & ~pagesize_mask);
- bin_unused += run->nfree * bin->reg_size;
+ {
+ MutexAutoLock lock(arena->mLock);
+
+ arena_mapped = arena->mStats.mapped;
+
+ /* "committed" counts dirty and allocated memory. */
+ arena_committed = arena->mStats.committed << pagesize_2pow;
+
+ arena_allocated = arena->mStats.allocated_small +
+ arena->mStats.allocated_large;
+
+ arena_dirty = arena->mNumDirty << pagesize_2pow;
+
+ for (j = 0; j < ntbins + nqbins + nsbins; j++) {
+ arena_bin_t* bin = &arena->mBins[j];
+ size_t bin_unused = 0;
+
+ for (auto mapelm : bin->runs.iter()) {
+ run = (arena_run_t*)(mapelm->bits & ~pagesize_mask);
+ bin_unused += run->nfree * bin->reg_size;
+ }
+
+ if (bin->runcur) {
+ bin_unused += bin->runcur->nfree * bin->reg_size;
+ }
+
+ arena_unused += bin_unused;
+ arena_headers += bin->stats.curruns * bin->reg0_offset;
}
-
- if (bin->runcur) {
- bin_unused += bin->runcur->nfree * bin->reg_size;
- }
-
- arena_unused += bin_unused;
- arena_headers += bin->stats.curruns * bin->reg0_offset;
}
- arena->mLock.Unlock();
-
MOZ_ASSERT(arena_mapped >= arena_committed);
MOZ_ASSERT(arena_committed >= arena_allocated + arena_dirty);
/* "waste" is committed memory that is neither dirty nor
* allocated. */
aStats->mapped += arena_mapped;
aStats->allocated += arena_allocated;
aStats->page_cache += arena_dirty;
@@ -4878,89 +4858,82 @@ hard_purge_chunk(arena_chunk_t *chunk)
i += npages;
}
}
/* Explicitly remove all of this arena's MADV_FREE'd pages from memory. */
void
arena_t::HardPurge()
{
- mLock.Lock();
+ MutexAutoLock lock(mLock);
while (!mChunksMAdvised.isEmpty()) {
arena_chunk_t* chunk = mChunksMAdvised.popFront();
hard_purge_chunk(chunk);
}
-
- mLock.Unlock();
}
template<> inline void
MozJemalloc::jemalloc_purge_freed_pages()
{
- arenas_lock.Lock();
+ MutexAutoLock lock(arenas_lock);
for (auto arena : gArenaTree.iter()) {
arena->HardPurge();
}
- arenas_lock.Unlock();
}
#else /* !defined MALLOC_DOUBLE_PURGE */
template<> inline void
MozJemalloc::jemalloc_purge_freed_pages()
{
/* Do nothing. */
}
#endif /* defined MALLOC_DOUBLE_PURGE */
template<> inline void
MozJemalloc::jemalloc_free_dirty_pages(void)
{
- arenas_lock.Lock();
+ MutexAutoLock lock(arenas_lock);
for (auto arena : gArenaTree.iter()) {
- arena->mLock.Lock();
+ MutexAutoLock arena_lock(arena->mLock);
arena->Purge(true);
- arena->mLock.Unlock();
}
- arenas_lock.Unlock();
}
inline arena_t*
arena_t::GetById(arena_id_t aArenaId)
{
arena_t key;
key.mId = aArenaId;
- arenas_lock.Lock();
+ MutexAutoLock lock(arenas_lock);
arena_t* result = gArenaTree.Search(&key);
- arenas_lock.Unlock();
MOZ_RELEASE_ASSERT(result);
return result;
}
#ifdef NIGHTLY_BUILD
template<> inline arena_id_t
MozJemalloc::moz_create_arena()
{
arena_t* arena = arenas_extend();
return arena->mId;
}
template<> inline void
MozJemalloc::moz_dispose_arena(arena_id_t aArenaId)
{
arena_t* arena = arena_t::GetById(aArenaId);
- arenas_lock.Lock();
+ MutexAutoLock lock(arenas_lock);
gArenaTree.Remove(arena);
// The arena is leaked, and remaining allocations in it still are alive
// until they are freed. After that, the arena will be empty but still
// taking have at least a chunk taking address space. TODO: bug 1364359.
- arenas_lock.Unlock();
}
#define MALLOC_DECL(name, return_type, ...) \
template<> inline return_type \
MozJemalloc::moz_arena_ ## name(arena_id_t aArenaId, ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
{ \
BaseAllocator allocator(arena_t::GetById(aArenaId)); \
return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \