--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -500,37 +500,44 @@ static size_t recycled_size;
#error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
#endif
/*
* Mutexes based on spinlocks. We can't use normal pthread spinlocks in all
* places, because they require malloc()ed memory, which causes bootstrapping
* issues in some cases.
*/
+struct Mutex
+{
#if defined(XP_WIN)
-#define malloc_mutex_t CRITICAL_SECTION
+ CRITICAL_SECTION mMutex;
#elif defined(XP_DARWIN)
-struct malloc_mutex_t {
- OSSpinLock lock;
+ OSSpinLock mMutex;
+#else
+ pthread_mutex_t mMutex;
+#endif
+
+ inline bool Init();
+
+ inline void Lock();
+
+ inline void Unlock();
};
-#else
-typedef pthread_mutex_t malloc_mutex_t;
-#endif
/* Set to true once the allocator has been initialized. */
static bool malloc_initialized = false;
#if defined(XP_WIN)
/* No init lock for Windows. */
#elif defined(XP_DARWIN)
-static malloc_mutex_t init_lock = {OS_SPINLOCK_INIT};
+static Mutex gInitLock = { OS_SPINLOCK_INIT };
#elif defined(XP_LINUX) && !defined(ANDROID)
-static malloc_mutex_t init_lock = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP;
+static Mutex gInitLock = { PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP };
#else
-static malloc_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
+static Mutex gInitLock = { PTHREAD_MUTEX_INITIALIZER };
#endif
/******************************************************************************/
/*
* Statistics data structures.
*/
struct malloc_bin_stats_t {
@@ -663,17 +670,17 @@ class AddressRadixTree {
#endif
static const size_t kBitsPerLevel = kNodeSize2Pow - SIZEOF_PTR_2POW;
static const size_t kBitsAtLevel1 =
(Bits % kBitsPerLevel) ? Bits % kBitsPerLevel : kBitsPerLevel;
static const size_t kHeight = (Bits + kBitsPerLevel - 1) / kBitsPerLevel;
static_assert(kBitsAtLevel1 + (kHeight - 1) * kBitsPerLevel == Bits,
"AddressRadixTree parameters don't work out");
- malloc_mutex_t mLock;
+ Mutex mLock;
void** mRoot;
public:
bool Init();
inline void* Get(void* aAddr);
// Returns whether the value was properly set
@@ -920,17 +927,17 @@ struct arena_t {
# define ARENA_MAGIC 0x947d3d24
#endif
arena_id_t mId;
/* Linkage for the tree of arenas by id. */
RedBlackTreeNode<arena_t> mLink;
/* All operations on this arena require that lock be locked. */
- malloc_mutex_t mLock;
+ Mutex mLock;
arena_stats_t mStats;
private:
/* Tree of dirty-page-containing chunks this arena manages. */
RedBlackTree<arena_chunk_t, ArenaDirtyChunkTrait> mChunksDirty;
#ifdef MALLOC_DOUBLE_PURGE
@@ -1062,29 +1069,29 @@ struct ArenaTreeTrait
/********/
/*
* Chunks.
*/
static AddressRadixTree<(SIZEOF_PTR << 3) - CHUNK_2POW_DEFAULT> gChunkRTree;
/* Protects chunk-related data structures. */
-static malloc_mutex_t chunks_mtx;
+static Mutex chunks_mtx;
/*
* Trees of chunks that were previously allocated (trees differ only in node
* ordering). These are used when allocating chunks, in an attempt to re-use
* address space. Depending on function, different tree orderings are needed,
* which is why there are two trees with the same contents.
*/
static RedBlackTree<extent_node_t, ExtentTreeSzTrait> chunks_szad_mmap;
static RedBlackTree<extent_node_t, ExtentTreeTrait> chunks_ad_mmap;
/* Protects huge allocation-related data structures. */
-static malloc_mutex_t huge_mtx;
+static Mutex huge_mtx;
/* Tree of chunks that are stand-alone huge allocations. */
static RedBlackTree<extent_node_t, ExtentTreeTrait> huge;
/* Huge allocation statistics. */
static uint64_t huge_nmalloc;
static uint64_t huge_ndalloc;
static size_t huge_allocated;
@@ -1100,31 +1107,31 @@ static size_t huge_mapped;
* pages are carved up in cacheline-size quanta, so that there is no chance of
* false cache line sharing.
*/
static void *base_pages;
static void *base_next_addr;
static void *base_next_decommitted;
static void *base_past_addr; /* Addr immediately past base_pages. */
static extent_node_t *base_nodes;
-static malloc_mutex_t base_mtx;
+static Mutex base_mtx;
static size_t base_mapped;
static size_t base_committed;
/********/
/*
* Arenas.
*/
// A tree of all available arenas, arranged by id.
// TODO: Move into arena_t as a static member when rb_tree doesn't depend on
// the type being defined anymore.
static RedBlackTree<arena_t, ArenaTreeTrait> gArenaTree;
static unsigned narenas;
-static malloc_mutex_t arenas_lock; /* Protects arenas initialization. */
+static Mutex arenas_lock; /* Protects arenas initialization. */
/*
* The arena associated with the current thread (per jemalloc_thread_local_arena)
* On OSX, __thread/thread_local circles back calling malloc to allocate storage
* on first access on each thread, which leads to an infinite loop, but
* pthread-based TLS somehow doesn't have this problem.
*/
#if !defined(XP_DARWIN)
@@ -1237,67 +1244,70 @@ static void
extern "C" MOZ_EXPORT
int pthread_atfork(void (*)(void), void (*)(void), void(*)(void));
#endif
/******************************************************************************/
/*
* Begin mutex. We can't use normal pthread mutexes in all places, because
* they require malloc()ed memory, which causes bootstrapping issues in some
- * cases.
+ * cases. We also can't use constructors, because for statics, they would fire
+ * after the first use of malloc, resetting the locks.
*/
-static bool
-malloc_mutex_init(malloc_mutex_t *mutex)
+// Initializes a mutex. Returns whether initialization succeeded.
+bool
+Mutex::Init()
{
#if defined(XP_WIN)
- if (!InitializeCriticalSectionAndSpinCount(mutex, _CRT_SPINCOUNT))
- return (true);
+ if (!InitializeCriticalSectionAndSpinCount(&mMutex, _CRT_SPINCOUNT)) {
+ return false;
+ }
#elif defined(XP_DARWIN)
- mutex->lock = OS_SPINLOCK_INIT;
+ mMutex = OS_SPINLOCK_INIT;
#elif defined(XP_LINUX) && !defined(ANDROID)
- pthread_mutexattr_t attr;
- if (pthread_mutexattr_init(&attr) != 0)
- return (true);
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
- if (pthread_mutex_init(mutex, &attr) != 0) {
- pthread_mutexattr_destroy(&attr);
- return (true);
- }
- pthread_mutexattr_destroy(&attr);
+ pthread_mutexattr_t attr;
+ if (pthread_mutexattr_init(&attr) != 0) {
+ return false;
+ }
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
+ if (pthread_mutex_init(&mMutex, &attr) != 0) {
+ pthread_mutexattr_destroy(&attr);
+ return false;
+ }
+ pthread_mutexattr_destroy(&attr);
#else
- if (pthread_mutex_init(mutex, nullptr) != 0)
- return (true);
+ if (pthread_mutex_init(&mMutex, nullptr) != 0) {
+ return false;
+ }
#endif
- return (false);
+ return true;
}
-static inline void
-malloc_mutex_lock(malloc_mutex_t *mutex)
+void
+Mutex::Lock()
{
-
#if defined(XP_WIN)
- EnterCriticalSection(mutex);
+ EnterCriticalSection(&mMutex);
#elif defined(XP_DARWIN)
- OSSpinLockLock(&mutex->lock);
+ OSSpinLockLock(&mMutex);
#else
- pthread_mutex_lock(mutex);
+ pthread_mutex_lock(&mMutex);
#endif
}
-static inline void
-malloc_mutex_unlock(malloc_mutex_t *mutex)
+void
+Mutex::Unlock()
{
-
#if defined(XP_WIN)
- LeaveCriticalSection(mutex);
+ LeaveCriticalSection(&mMutex);
#elif defined(XP_DARWIN)
- OSSpinLockUnlock(&mutex->lock);
+ OSSpinLockUnlock(&mMutex);
#else
- pthread_mutex_unlock(mutex);
+ pthread_mutex_unlock(&mMutex);
#endif
}
/*
* End mutex.
*/
/******************************************************************************/
/*
@@ -1444,21 +1454,21 @@ static void *
base_alloc(size_t size)
{
void *ret;
size_t csize;
/* Round size up to nearest multiple of the cacheline size. */
csize = CACHELINE_CEILING(size);
- malloc_mutex_lock(&base_mtx);
+ base_mtx.Lock();
/* Make sure there's enough space for the allocation. */
if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
if (base_pages_alloc(csize)) {
- malloc_mutex_unlock(&base_mtx);
+ base_mtx.Unlock();
return nullptr;
}
}
/* Allocate. */
ret = base_next_addr;
base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
/* Make sure enough pages are committed for the new allocation. */
if ((uintptr_t)base_next_addr > (uintptr_t)base_next_decommitted) {
@@ -1468,17 +1478,17 @@ base_alloc(size_t size)
# ifdef MALLOC_DECOMMIT
pages_commit(base_next_decommitted, (uintptr_t)pbase_next_addr -
(uintptr_t)base_next_decommitted);
# endif
base_next_decommitted = pbase_next_addr;
base_committed += (uintptr_t)pbase_next_addr -
(uintptr_t)base_next_decommitted;
}
- malloc_mutex_unlock(&base_mtx);
+ base_mtx.Unlock();
return (ret);
}
static void *
base_calloc(size_t number, size_t size)
{
void *ret;
@@ -1489,37 +1499,37 @@ base_calloc(size_t number, size_t size)
return (ret);
}
static extent_node_t *
base_node_alloc(void)
{
extent_node_t *ret;
- malloc_mutex_lock(&base_mtx);
+ base_mtx.Lock();
if (base_nodes) {
ret = base_nodes;
base_nodes = *(extent_node_t **)ret;
- malloc_mutex_unlock(&base_mtx);
+ base_mtx.Unlock();
} else {
- malloc_mutex_unlock(&base_mtx);
+ base_mtx.Unlock();
ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
}
return (ret);
}
static void
base_node_dealloc(extent_node_t *node)
{
- malloc_mutex_lock(&base_mtx);
+ base_mtx.Lock();
*(extent_node_t **)node = base_nodes;
base_nodes = node;
- malloc_mutex_unlock(&base_mtx);
+ base_mtx.Unlock();
}
/*
* End Utility functions/macros.
*/
/******************************************************************************/
/*
* Begin chunk management functions.
@@ -1674,18 +1684,17 @@ pages_copy(void *dest, const void *src,
(vm_address_t)dest);
}
#endif
template <size_t Bits>
bool
AddressRadixTree<Bits>::Init()
{
- malloc_mutex_init(&mLock);
-
+ mLock.Init();
mRoot = (void**)base_calloc(1 << kBitsAtLevel1, sizeof(void*));
return mRoot;
}
template <size_t Bits>
void**
AddressRadixTree<Bits>::GetSlot(void* aKey, bool aCreate)
{
@@ -1728,52 +1737,52 @@ AddressRadixTree<Bits>::Get(void* aKey)
void* ret = nullptr;
void** slot = GetSlot(aKey);
if (slot) {
ret = *slot;
}
#ifdef MOZ_DEBUG
- malloc_mutex_lock(&mlock);
+ mlock.Lock();
/*
* Suppose that it were possible for a jemalloc-allocated chunk to be
* munmap()ped, followed by a different allocator in another thread re-using
* overlapping virtual memory, all without invalidating the cached rtree
* value. The result would be a false positive (the rtree would claim that
* jemalloc owns memory that it had actually discarded). I don't think this
* scenario is possible, but the following assertion is a prudent sanity
* check.
*/
if (!slot) {
// In case a slot has been created in the meantime.
slot = GetSlot(aKey);
}
if (slot) {
- // The malloc_mutex_lock call above should act as a memory barrier, forcing
+ // The Lock() call above should act as a memory barrier, forcing
// the compiler to emit a new read instruction for *slot.
MOZ_ASSERT(ret == *slot);
} else {
MOZ_ASSERT(ret == nullptr);
}
- malloc_mutex_unlock(&mlock);
+ mlock.Unlock();
#endif
return ret;
}
template <size_t Bits>
bool
AddressRadixTree<Bits>::Set(void* aKey, void* aValue)
{
- malloc_mutex_lock(&mLock);
+ mLock.Lock();
void** slot = GetSlot(aKey, /* create */ true);
if (slot) {
*slot = aValue;
}
- malloc_mutex_unlock(&mLock);
+ mLock.Unlock();
return slot;
}
/* pages_trim, chunk_alloc_mmap_slow and chunk_alloc_mmap were cherry-picked
* from upstream jemalloc 3.4.1 to fix Mozilla bug 956501. */
/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
@@ -1941,20 +1950,20 @@ chunk_recycle(RedBlackTree<extent_node_t
}
alloc_size = size + alignment - chunksize;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return nullptr;
key.addr = nullptr;
key.size = alloc_size;
- malloc_mutex_lock(&chunks_mtx);
+ chunks_mtx.Lock();
node = chunks_szad->SearchOrNext(&key);
if (!node) {
- malloc_mutex_unlock(&chunks_mtx);
+ chunks_mtx.Unlock();
return nullptr;
}
leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
(uintptr_t)node->addr;
MOZ_ASSERT(node->size >= leadsize + size);
trailsize = node->size - leadsize - size;
ret = (void *)((uintptr_t)node->addr + leadsize);
chunk_type = node->chunk_type;
@@ -1976,35 +1985,35 @@ chunk_recycle(RedBlackTree<extent_node_t
if (!node) {
/*
* An additional node is required, but
* base_node_alloc() can cause a new base chunk to be
* allocated. Drop chunks_mtx in order to avoid
* deadlock, and if node allocation fails, deallocate
* the result before returning an error.
*/
- malloc_mutex_unlock(&chunks_mtx);
+ chunks_mtx.Unlock();
node = base_node_alloc();
if (!node) {
chunk_dealloc(ret, size, chunk_type);
return nullptr;
}
- malloc_mutex_lock(&chunks_mtx);
+ chunks_mtx.Lock();
}
node->addr = (void *)((uintptr_t)(ret) + size);
node->size = trailsize;
node->chunk_type = chunk_type;
chunks_szad->Insert(node);
chunks_ad->Insert(node);
node = nullptr;
}
recycled_size -= size;
- malloc_mutex_unlock(&chunks_mtx);
+ chunks_mtx.Unlock();
if (node)
base_node_dealloc(node);
#ifdef MALLOC_DECOMMIT
pages_commit(ret, size);
// pages_commit is guaranteed to zero the chunk.
if (zeroed) {
*zeroed = true;
@@ -2104,17 +2113,17 @@ chunk_record(RedBlackTree<extent_node_t,
* be needed, because base_node_alloc() may cause a new base chunk to
* be allocated, which could cause deadlock if chunks_mtx were already
* held.
*/
xnode = base_node_alloc();
/* Use xprev to implement conditional deferred deallocation of prev. */
xprev = nullptr;
- malloc_mutex_lock(&chunks_mtx);
+ chunks_mtx.Lock();
key.addr = (void *)((uintptr_t)chunk + size);
node = chunks_ad->SearchOrNext(&key);
/* Try to coalesce forward. */
if (node && node->addr == key.addr) {
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
@@ -2167,17 +2176,17 @@ chunk_record(RedBlackTree<extent_node_t,
chunks_szad->Insert(node);
xprev = prev;
}
recycled_size += size;
label_return:
- malloc_mutex_unlock(&chunks_mtx);
+ chunks_mtx.Unlock();
/*
* Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
* avoid potential deadlock.
*/
if (xnode)
base_node_dealloc(xnode);
if (xprev)
base_node_dealloc(xprev);
@@ -3115,30 +3124,30 @@ arena_t::MallocSmall(size_t aSize, bool
} else {
/* Sub-page. */
aSize = pow2_ceil(aSize);
bin = &mBins[ntbins + nqbins
+ (ffs((int)(aSize >> SMALL_MAX_2POW_DEFAULT)) - 2)];
}
MOZ_DIAGNOSTIC_ASSERT(aSize == bin->reg_size);
- malloc_mutex_lock(&mLock);
+ mLock.Lock();
if ((run = bin->runcur) && run->nfree > 0) {
ret = MallocBinEasy(bin, run);
} else {
ret = MallocBinHard(bin);
}
if (!ret) {
- malloc_mutex_unlock(&mLock);
+ mLock.Unlock();
return nullptr;
}
mStats.allocated_small += aSize;
- malloc_mutex_unlock(&mLock);
+ mLock.Unlock();
if (aZero == false) {
if (opt_junk) {
memset(ret, kAllocJunk, aSize);
} else if (opt_zero) {
memset(ret, 0, aSize);
}
} else
@@ -3149,24 +3158,24 @@ arena_t::MallocSmall(size_t aSize, bool
void*
arena_t::MallocLarge(size_t aSize, bool aZero)
{
void* ret;
/* Large allocation. */
aSize = PAGE_CEILING(aSize);
- malloc_mutex_lock(&mLock);
+ mLock.Lock();
ret = AllocRun(nullptr, aSize, true, aZero);
if (!ret) {
- malloc_mutex_unlock(&mLock);
+ mLock.Unlock();
return nullptr;
}
mStats.allocated_large += aSize;
- malloc_mutex_unlock(&mLock);
+ mLock.Unlock();
if (aZero == false) {
if (opt_junk) {
memset(ret, kAllocJunk, aSize);
} else if (opt_zero) {
memset(ret, 0, aSize);
}
}
@@ -3203,20 +3212,20 @@ arena_t::Palloc(size_t aAlignment, size_
{
void* ret;
size_t offset;
arena_chunk_t* chunk;
MOZ_ASSERT((aSize & pagesize_mask) == 0);
MOZ_ASSERT((aAlignment & pagesize_mask) == 0);
- malloc_mutex_lock(&mLock);
+ mLock.Lock();
ret = AllocRun(nullptr, aAllocSize, true, false);
if (!ret) {
- malloc_mutex_unlock(&mLock);
+ mLock.Unlock();
return nullptr;
}
chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(ret);
offset = uintptr_t(ret) & (aAlignment - 1);
MOZ_ASSERT((offset & pagesize_mask) == 0);
MOZ_ASSERT(offset < aAllocSize);
@@ -3235,17 +3244,17 @@ arena_t::Palloc(size_t aAlignment, size_
if (trailsize != 0) {
/* Trim trailing space. */
MOZ_ASSERT(trailsize < aAllocSize);
TrimRunTail(chunk, (arena_run_t*)ret, aSize + trailsize, aSize, false);
}
}
mStats.allocated_large += aSize;
- malloc_mutex_unlock(&mLock);
+ mLock.Unlock();
if (opt_junk) {
memset(ret, kAllocJunk, aSize);
} else if (opt_zero) {
memset(ret, 0, aSize);
}
return ret;
}
@@ -3403,23 +3412,23 @@ isalloc_validate(const void* ptr)
return arena_salloc(ptr);
} else {
size_t ret;
extent_node_t* node;
extent_node_t key;
/* Chunk. */
key.addr = (void*)chunk;
- malloc_mutex_lock(&huge_mtx);
+ huge_mtx.Lock();
node = huge.Search(&key);
if (node)
ret = node->size;
else
ret = 0;
- malloc_mutex_unlock(&huge_mtx);
+ huge_mtx.Unlock();
return ret;
}
}
static inline size_t
isalloc(const void *ptr)
{
size_t ret;
@@ -3433,26 +3442,26 @@ isalloc(const void *ptr)
MOZ_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
ret = arena_salloc(ptr);
} else {
extent_node_t *node, key;
/* Chunk (huge allocation). */
- malloc_mutex_lock(&huge_mtx);
+ huge_mtx.Lock();
/* Extract from tree of huge allocations. */
key.addr = const_cast<void*>(ptr);
node = huge.Search(&key);
MOZ_DIAGNOSTIC_ASSERT(node);
ret = node->size;
- malloc_mutex_unlock(&huge_mtx);
+ huge_mtx.Unlock();
}
return (ret);
}
template<> inline void
MozJemalloc::jemalloc_ptr_info(const void* aPtr, jemalloc_ptr_info_t* aInfo)
{
@@ -3464,24 +3473,24 @@ MozJemalloc::jemalloc_ptr_info(const voi
return;
}
// Look for huge allocations before looking for |chunk| in gChunkRTree.
// This is necessary because |chunk| won't be in gChunkRTree if it's
// the second or subsequent chunk in a huge allocation.
extent_node_t* node;
extent_node_t key;
- malloc_mutex_lock(&huge_mtx);
+ huge_mtx.Lock();
key.addr = const_cast<void*>(aPtr);
node = reinterpret_cast<
RedBlackTree<extent_node_t, ExtentTreeBoundsTrait>*>(&huge)->Search(&key);
if (node) {
*aInfo = { TagLiveHuge, node->addr, node->size };
}
- malloc_mutex_unlock(&huge_mtx);
+ huge_mtx.Unlock();
if (node) {
return;
}
// It's not a huge allocation. Check if we have a known chunk.
if (!gChunkRTree.Get(chunk)) {
*aInfo = { TagUnknown, nullptr, 0 };
return;
@@ -3676,28 +3685,28 @@ arena_dalloc(void *ptr, size_t offset)
MOZ_ASSERT(offset != 0);
MOZ_ASSERT(CHUNK_ADDR2OFFSET(ptr) == offset);
chunk = (arena_chunk_t *) ((uintptr_t)ptr - offset);
arena = chunk->arena;
MOZ_ASSERT(arena);
MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
- malloc_mutex_lock(&arena->mLock);
+ arena->mLock.Lock();
pageind = offset >> pagesize_2pow;
mapelm = &chunk->map[pageind];
MOZ_DIAGNOSTIC_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
/* Small allocation. */
arena->DallocSmall(chunk, ptr, mapelm);
} else {
/* Large allocation. */
arena->DallocLarge(chunk, ptr);
}
- malloc_mutex_unlock(&arena->mLock);
+ arena->mLock.Unlock();
}
static inline void
idalloc(void *ptr)
{
size_t offset;
MOZ_ASSERT(ptr);
@@ -3714,30 +3723,30 @@ arena_t::RallocShrinkLarge(arena_chunk_t
size_t aOldSize)
{
MOZ_ASSERT(aSize < aOldSize);
/*
* Shrink the run, and make trailing pages available for other
* allocations.
*/
- malloc_mutex_lock(&mLock);
+ mLock.Lock();
TrimRunTail(aChunk, (arena_run_t*)aPtr, aOldSize, aSize, true);
mStats.allocated_large -= aOldSize - aSize;
- malloc_mutex_unlock(&mLock);
+ mLock.Unlock();
}
bool
arena_t::RallocGrowLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
size_t aOldSize)
{
size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> pagesize_2pow;
size_t npages = aOldSize >> pagesize_2pow;
- malloc_mutex_lock(&mLock);
+ mLock.Lock();
MOZ_DIAGNOSTIC_ASSERT(aOldSize == (aChunk->map[pageind].bits & ~pagesize_mask));
/* Try to extend the run. */
MOZ_ASSERT(aSize > aOldSize);
if (pageind + npages < chunk_npages && (aChunk->map[pageind+npages].bits
& CHUNK_MAP_ALLOCATED) == 0 && (aChunk->map[pageind+npages].bits &
~pagesize_mask) >= aSize - aOldSize) {
/*
@@ -3750,20 +3759,20 @@ arena_t::RallocGrowLarge(arena_chunk_t*
false);
aChunk->map[pageind].bits = aSize | CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED;
aChunk->map[pageind+npages].bits = CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED;
mStats.allocated_large += aSize - aOldSize;
- malloc_mutex_unlock(&mLock);
+ mLock.Unlock();
return false;
}
- malloc_mutex_unlock(&mLock);
+ mLock.Unlock();
return true;
}
/*
* Try to resize a large allocation, in order to avoid copying. This will
* always fail if growing an object, and the following run is already in use.
*/
@@ -3884,17 +3893,17 @@ iralloc(void* aPtr, size_t aSize, arena_
bool
arena_t::Init()
{
unsigned i;
arena_bin_t* bin;
size_t prev_run_size;
- if (malloc_mutex_init(&mLock))
+ if (!mLock.Init())
return true;
memset(&mLink, 0, sizeof(mLink));
memset(&mStats, 0, sizeof(arena_stats_t));
/* Initialize chunks. */
mChunksDirty.Init();
#ifdef MALLOC_DOUBLE_PURGE
@@ -3983,23 +3992,23 @@ arenas_extend()
/* Allocate enough space for trailing bins. */
ret = (arena_t *)base_alloc(sizeof(arena_t)
+ (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
if (!ret || ret->Init()) {
return arenas_fallback();
}
- malloc_mutex_lock(&arenas_lock);
+ arenas_lock.Lock();
// TODO: Use random Ids.
ret->mId = narenas++;
gArenaTree.Insert(ret);
- malloc_mutex_unlock(&arenas_lock);
+ arenas_lock.Unlock();
return ret;
}
/*
* End arena.
*/
/******************************************************************************/
/*
@@ -4043,17 +4052,17 @@ huge_palloc(size_t size, size_t alignmen
chunk_ensure_zero(ret, csize, zeroed);
}
/* Insert node into huge. */
node->addr = ret;
psize = PAGE_CEILING(size);
node->size = psize;
- malloc_mutex_lock(&huge_mtx);
+ huge_mtx.Lock();
huge.Insert(node);
huge_nmalloc++;
/* Although we allocated space for csize bytes, we indicate that we've
* allocated only psize bytes.
*
* If DECOMMIT is defined, this is a reasonable thing to do, since
* we'll explicitly decommit the bytes in excess of psize.
@@ -4067,17 +4076,17 @@ huge_palloc(size_t size, size_t alignmen
* requested if it first calls malloc_usable_size and finds out how
* much space it has to play with. But because we set node->size =
* psize above, malloc_usable_size will return psize, not csize, and
* the program will (hopefully) never touch bytes in excess of psize.
* Thus those bytes won't take up space in physical memory, and we can
* reasonably claim we never "allocated" them in the first place. */
huge_allocated += psize;
huge_mapped += csize;
- malloc_mutex_unlock(&huge_mtx);
+ huge_mtx.Unlock();
#ifdef MALLOC_DECOMMIT
if (csize - psize > 0)
pages_decommit((void *)((uintptr_t)ret + psize), csize - psize);
#endif
if (zero == false) {
if (opt_junk)
@@ -4115,51 +4124,51 @@ huge_ralloc(void *ptr, size_t size, size
#ifdef MALLOC_DECOMMIT
if (psize < oldsize) {
extent_node_t *node, key;
pages_decommit((void *)((uintptr_t)ptr + psize),
oldsize - psize);
/* Update recorded size. */
- malloc_mutex_lock(&huge_mtx);
+ huge_mtx.Lock();
key.addr = const_cast<void*>(ptr);
node = huge.Search(&key);
MOZ_ASSERT(node);
MOZ_ASSERT(node->size == oldsize);
huge_allocated -= oldsize - psize;
/* No need to change huge_mapped, because we didn't
* (un)map anything. */
node->size = psize;
- malloc_mutex_unlock(&huge_mtx);
+ huge_mtx.Unlock();
} else if (psize > oldsize) {
pages_commit((void *)((uintptr_t)ptr + oldsize),
psize - oldsize);
}
#endif
/* Although we don't have to commit or decommit anything if
* DECOMMIT is not defined and the size class didn't change, we
* do need to update the recorded size if the size increased,
* so malloc_usable_size doesn't return a value smaller than
* what was requested via realloc(). */
if (psize > oldsize) {
/* Update recorded size. */
extent_node_t *node, key;
- malloc_mutex_lock(&huge_mtx);
+ huge_mtx.Lock();
key.addr = const_cast<void*>(ptr);
node = huge.Search(&key);
MOZ_ASSERT(node);
MOZ_ASSERT(node->size == oldsize);
huge_allocated += psize - oldsize;
/* No need to change huge_mapped, because we didn't
* (un)map anything. */
node->size = psize;
- malloc_mutex_unlock(&huge_mtx);
+ huge_mtx.Unlock();
}
if (opt_zero && size > oldsize) {
memset((void *)((uintptr_t)ptr + oldsize), 0, size
- oldsize);
}
return (ptr);
}
@@ -4184,30 +4193,30 @@ huge_ralloc(void *ptr, size_t size, size
return (ret);
}
static void
huge_dalloc(void *ptr)
{
extent_node_t *node, key;
- malloc_mutex_lock(&huge_mtx);
+ huge_mtx.Lock();
/* Extract from tree of huge allocations. */
key.addr = ptr;
node = huge.Search(&key);
MOZ_ASSERT(node);
MOZ_ASSERT(node->addr == ptr);
huge.Remove(node);
huge_ndalloc++;
huge_allocated -= node->size;
huge_mapped -= CHUNK_CEILING(node->size);
- malloc_mutex_unlock(&huge_mtx);
+ huge_mtx.Unlock();
/* Unmap chunk. */
chunk_dealloc(node->addr, CHUNK_CEILING(node->size), HUGE_CHUNK);
base_node_dealloc(node);
}
/*
@@ -4252,26 +4261,26 @@ static
bool
malloc_init_hard(void)
{
unsigned i;
const char *opts;
long result;
#ifndef XP_WIN
- malloc_mutex_lock(&init_lock);
+ gInitLock.Lock();
#endif
if (malloc_initialized) {
/*
* Another thread initialized the allocator before this one
- * acquired init_lock.
+ * acquired gInitLock.
*/
#ifndef XP_WIN
- malloc_mutex_unlock(&init_lock);
+ gInitLock.Unlock();
#endif
return false;
}
if (!thread_arena.init()) {
return false;
}
@@ -4376,45 +4385,45 @@ MALLOC_OUT:
/* Various sanity checks that regard configuration. */
MOZ_ASSERT(quantum >= sizeof(void *));
MOZ_ASSERT(quantum <= pagesize);
MOZ_ASSERT(chunksize >= pagesize);
MOZ_ASSERT(quantum * 4 <= chunksize);
/* Initialize chunks data. */
- malloc_mutex_init(&chunks_mtx);
+ chunks_mtx.Init();
chunks_szad_mmap.Init();
chunks_ad_mmap.Init();
/* Initialize huge allocation data. */
- malloc_mutex_init(&huge_mtx);
+ huge_mtx.Init();
huge.Init();
huge_nmalloc = 0;
huge_ndalloc = 0;
huge_allocated = 0;
huge_mapped = 0;
/* Initialize base allocation data structures. */
base_mapped = 0;
base_committed = 0;
base_nodes = nullptr;
- malloc_mutex_init(&base_mtx);
-
- malloc_mutex_init(&arenas_lock);
+ base_mtx.Init();
+
+ arenas_lock.Init();
/*
* Initialize one arena here.
*/
gArenaTree.Init();
arenas_extend();
gMainArena = gArenaTree.First();
if (!gMainArena) {
#ifndef XP_WIN
- malloc_mutex_unlock(&init_lock);
+ gInitLock.Unlock();
#endif
return true;
}
/* arena_t::Init() sets this to a lower value for thread local arenas;
* reset to the default value for the main arena. */
gMainArena->mMaxDirty = opt_dirty_max;
/*
@@ -4429,17 +4438,17 @@ MALLOC_OUT:
malloc_initialized = true;
#if !defined(XP_WIN) && !defined(XP_DARWIN)
/* Prevent potential deadlock on malloc locks after fork. */
pthread_atfork(_malloc_prefork, _malloc_postfork_parent, _malloc_postfork_child);
#endif
#ifndef XP_WIN
- malloc_mutex_unlock(&init_lock);
+ gInitLock.Unlock();
#endif
return false;
}
/*
* End general internal functions.
*/
/******************************************************************************/
@@ -4746,44 +4755,44 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
aStats->waste = 0;
aStats->page_cache = 0;
aStats->bookkeeping = 0;
aStats->bin_unused = 0;
non_arena_mapped = 0;
/* Get huge mapped/allocated. */
- malloc_mutex_lock(&huge_mtx);
+ huge_mtx.Lock();
non_arena_mapped += huge_mapped;
aStats->allocated += huge_allocated;
MOZ_ASSERT(huge_mapped >= huge_allocated);
- malloc_mutex_unlock(&huge_mtx);
+ huge_mtx.Unlock();
/* Get base mapped/allocated. */
- malloc_mutex_lock(&base_mtx);
+ base_mtx.Lock();
non_arena_mapped += base_mapped;
aStats->bookkeeping += base_committed;
MOZ_ASSERT(base_mapped >= base_committed);
- malloc_mutex_unlock(&base_mtx);
-
- malloc_mutex_lock(&arenas_lock);
+ base_mtx.Unlock();
+
+ arenas_lock.Lock();
/* Iterate over arenas. */
for (auto arena : gArenaTree.iter()) {
size_t arena_mapped, arena_allocated, arena_committed, arena_dirty, j,
arena_unused, arena_headers;
arena_run_t* run;
if (!arena) {
continue;
}
arena_headers = 0;
arena_unused = 0;
- malloc_mutex_lock(&arena->mLock);
+ arena->mLock.Lock();
arena_mapped = arena->mStats.mapped;
/* "committed" counts dirty and allocated memory. */
arena_committed = arena->mStats.committed << pagesize_2pow;
arena_allocated = arena->mStats.allocated_small +
arena->mStats.allocated_large;
@@ -4802,32 +4811,32 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
if (bin->runcur) {
bin_unused += bin->runcur->nfree * bin->reg_size;
}
arena_unused += bin_unused;
arena_headers += bin->stats.curruns * bin->reg0_offset;
}
- malloc_mutex_unlock(&arena->mLock);
+ arena->mLock.Unlock();
MOZ_ASSERT(arena_mapped >= arena_committed);
MOZ_ASSERT(arena_committed >= arena_allocated + arena_dirty);
/* "waste" is committed memory that is neither dirty nor
* allocated. */
aStats->mapped += arena_mapped;
aStats->allocated += arena_allocated;
aStats->page_cache += arena_dirty;
aStats->waste += arena_committed -
arena_allocated - arena_dirty - arena_unused - arena_headers;
aStats->bin_unused += arena_unused;
aStats->bookkeeping += arena_headers;
}
- malloc_mutex_unlock(&arenas_lock);
+ arenas_lock.Unlock();
/* Account for arena chunk headers in bookkeeping rather than waste. */
chunk_header_size =
((aStats->mapped / aStats->chunksize) * arena_chunk_header_npages) <<
pagesize_2pow;
aStats->mapped += non_arena_mapped;
aStats->bookkeeping += chunk_header_size;
@@ -4867,89 +4876,89 @@ hard_purge_chunk(arena_chunk_t *chunk)
i += npages;
}
}
/* Explicitly remove all of this arena's MADV_FREE'd pages from memory. */
void
arena_t::HardPurge()
{
- malloc_mutex_lock(&mLock);
+ mLock.Lock();
while (!mChunksMAdvised.isEmpty()) {
arena_chunk_t* chunk = mChunksMAdvised.popFront();
hard_purge_chunk(chunk);
}
- malloc_mutex_unlock(&mLock);
+ mLock.Unlock();
}
template<> inline void
MozJemalloc::jemalloc_purge_freed_pages()
{
- malloc_mutex_lock(&arenas_lock);
+ arenas_lock.Lock();
for (auto arena : gArenaTree.iter()) {
arena->HardPurge();
}
- malloc_mutex_unlock(&arenas_lock);
+ arenas_lock.Unlock();
}
#else /* !defined MALLOC_DOUBLE_PURGE */
template<> inline void
MozJemalloc::jemalloc_purge_freed_pages()
{
/* Do nothing. */
}
#endif /* defined MALLOC_DOUBLE_PURGE */
template<> inline void
MozJemalloc::jemalloc_free_dirty_pages(void)
{
- malloc_mutex_lock(&arenas_lock);
+ arenas_lock.Lock();
for (auto arena : gArenaTree.iter()) {
- malloc_mutex_lock(&arena->mLock);
+ arena->mLock.Lock();
arena->Purge(true);
- malloc_mutex_unlock(&arena->mLock);
+ arena->mLock.Unlock();
}
- malloc_mutex_unlock(&arenas_lock);
+ arenas_lock.Unlock();
}
inline arena_t*
arena_t::GetById(arena_id_t aArenaId)
{
arena_t key;
key.mId = aArenaId;
- malloc_mutex_lock(&arenas_lock);
+ arenas_lock.Lock();
arena_t* result = gArenaTree.Search(&key);
- malloc_mutex_unlock(&arenas_lock);
+ arenas_lock.Unlock();
MOZ_RELEASE_ASSERT(result);
return result;
}
#ifdef NIGHTLY_BUILD
template<> inline arena_id_t
MozJemalloc::moz_create_arena()
{
arena_t* arena = arenas_extend();
return arena->mId;
}
template<> inline void
MozJemalloc::moz_dispose_arena(arena_id_t aArenaId)
{
arena_t* arena = arena_t::GetById(aArenaId);
- malloc_mutex_lock(&arenas_lock);
+ arenas_lock.Lock();
gArenaTree.Remove(arena);
// The arena is leaked, and remaining allocations in it still are alive
// until they are freed. After that, the arena will be empty but still
// taking have at least a chunk taking address space. TODO: bug 1364359.
- malloc_mutex_unlock(&arenas_lock);
+ arenas_lock.Unlock();
}
#define MALLOC_DECL(name, return_type, ...) \
template<> inline return_type \
MozJemalloc::moz_arena_ ## name(arena_id_t aArenaId, ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
{ \
BaseAllocator allocator(arena_t::GetById(aArenaId)); \
return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
@@ -4984,61 +4993,61 @@ MozJemalloc::moz_dispose_arena(arena_id_
#ifndef XP_DARWIN
static
#endif
void
_malloc_prefork(void)
{
/* Acquire all mutexes in a safe order. */
- malloc_mutex_lock(&arenas_lock);
+ arenas_lock.Lock();
for (auto arena : gArenaTree.iter()) {
- malloc_mutex_lock(&arena->mLock);
+ arena->mLock.Lock();
}
- malloc_mutex_lock(&base_mtx);
-
- malloc_mutex_lock(&huge_mtx);
+ base_mtx.Lock();
+
+ huge_mtx.Lock();
}
#ifndef XP_DARWIN
static
#endif
void
_malloc_postfork_parent(void)
{
/* Release all mutexes, now that fork() has completed. */
- malloc_mutex_unlock(&huge_mtx);
-
- malloc_mutex_unlock(&base_mtx);
+ huge_mtx.Unlock();
+
+ base_mtx.Unlock();
for (auto arena : gArenaTree.iter()) {
- malloc_mutex_unlock(&arena->mLock);
+ arena->mLock.Unlock();
}
- malloc_mutex_unlock(&arenas_lock);
+ arenas_lock.Unlock();
}
#ifndef XP_DARWIN
static
#endif
void
_malloc_postfork_child(void)
{
/* Reinitialize all mutexes, now that fork() has completed. */
- malloc_mutex_init(&huge_mtx);
-
- malloc_mutex_init(&base_mtx);
+ huge_mtx.Init();
+
+ base_mtx.Init();
for (auto arena : gArenaTree.iter()) {
- malloc_mutex_init(&arena->mLock);
+ arena->mLock.Init();
}
- malloc_mutex_init(&arenas_lock);
+ arenas_lock.Init();
}
/*
* End library-private functions.
*/
/******************************************************************************/
#ifdef MOZ_REPLACE_MALLOC