--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -297,16 +297,18 @@ static inline void*
#endif
// Size and alignment of memory chunks that are allocated by the OS's virtual
// memory system.
#define CHUNK_2POW_DEFAULT 20
// Maximum number of dirty pages per arena.
#define DIRTY_MAX_DEFAULT (1U << 8)
+static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
+
// Maximum size of L1 cache line. This is used to avoid cache line aliasing,
// so over-estimates are okay (up to a point), but under-estimates will
// negatively affect performance.
#define CACHELINE_2POW 6
#define CACHELINE ((size_t)(1U << CACHELINE_2POW))
// Smallest size class to support. On Windows the smallest allocation size
// must be 8 bytes on 32-bit, 16 bytes on 64-bit. On Linux and Mac, even
@@ -952,18 +954,16 @@ public:
// --------+------+
// 35 | 1024 |
// 36 | 2048 |
// --------+------+
arena_bin_t mBins[1]; // Dynamically sized.
arena_t();
- static inline arena_t* GetById(arena_id_t aArenaId);
-
private:
void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
void DeallocChunk(arena_chunk_t* aChunk);
arena_run_t* AllocRun(arena_bin_t* aBin,
size_t aSize,
bool aLarge,
@@ -1048,16 +1048,61 @@ struct ArenaTreeTrait
static inline int Compare(arena_t* aNode, arena_t* aOther)
{
MOZ_ASSERT(aNode);
MOZ_ASSERT(aOther);
return (aNode->mId > aOther->mId) - (aNode->mId < aOther->mId);
}
};
+// Bookkeeping for all the arenas used by the allocator.
+class ArenaCollection
+{
+public:
+ bool Init()
+ {
+ mArenas.Init();
+ mDefaultArena = mLock.Init() ? CreateArena() : nullptr;
+ if (mDefaultArena) {
+ // arena_t constructor sets this to a lower value for thread local
+ // arenas; Reset to the default value for the main arena.
+ mDefaultArena->mMaxDirty = opt_dirty_max;
+ }
+ return bool(mDefaultArena);
+ }
+
+ inline arena_t* GetById(arena_id_t aArenaId);
+
+ arena_t* CreateArena();
+
+ void DisposeArena(arena_t* aArena)
+ {
+ MutexAutoLock lock(mLock);
+ mArenas.Remove(aArena);
+ // The arena is leaked, and remaining allocations in it still are alive
+ // until they are freed. After that, the arena will be empty but still
+ // taking have at least a chunk taking address space. TODO: bug 1364359.
+ }
+
+ using Iterator = RedBlackTree<arena_t, ArenaTreeTrait>::Iterator;
+
+ Iterator iter() { return mArenas.iter(); }
+
+ inline arena_t* GetDefault() { return mDefaultArena; }
+
+ Mutex mLock;
+
+private:
+ arena_t* mDefaultArena;
+ arena_id_t mLastArenaId;
+ RedBlackTree<arena_t, ArenaTreeTrait> mArenas;
+};
+
+static ArenaCollection gArenas;
+
// ******
// Chunks.
static AddressRadixTree<(sizeof(void*) << 3) - CHUNK_2POW_DEFAULT> gChunkRTree;
// Protects chunk-related data structures.
static Mutex chunks_mtx;
// Trees of chunks that were previously allocated (trees differ only in node
@@ -1091,68 +1136,53 @@ static void* base_past_addr; // Addr imm
static extent_node_t* base_nodes;
static Mutex base_mtx;
static size_t base_mapped;
static size_t base_committed;
// ******
// Arenas.
-// A tree of all available arenas, arranged by id.
-// TODO: Move into arena_t as a static member when rb_tree doesn't depend on
-// the type being defined anymore.
-static RedBlackTree<arena_t, ArenaTreeTrait> gArenaTree;
-static unsigned narenas;
-static Mutex arenas_lock; // Protects arenas initialization.
-
// The arena associated with the current thread (per jemalloc_thread_local_arena)
// On OSX, __thread/thread_local circles back calling malloc to allocate storage
// on first access on each thread, which leads to an infinite loop, but
// pthread-based TLS somehow doesn't have this problem.
#if !defined(XP_DARWIN)
static MOZ_THREAD_LOCAL(arena_t*) thread_arena;
#else
static detail::ThreadLocal<arena_t*, detail::ThreadLocalKeyStorage>
thread_arena;
#endif
-// The main arena, which all threads default to until jemalloc_thread_local_arena
-// is called.
-static arena_t* gMainArena;
-
// *****************************
// Runtime configuration options.
const uint8_t kAllocJunk = 0xe4;
const uint8_t kAllocPoison = 0xe5;
#ifdef MOZ_DEBUG
static bool opt_junk = true;
static bool opt_zero = false;
#else
static const bool opt_junk = false;
static const bool opt_zero = false;
#endif
-static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
-
// ***************************************************************************
// Begin forward declarations.
static void*
chunk_alloc(size_t aSize,
size_t aAlignment,
bool aBase,
bool* aZeroed = nullptr);
static void
chunk_dealloc(void* aChunk, size_t aSize, ChunkType aType);
static void
chunk_ensure_zero(void* aPtr, size_t aSize, bool aZeroed);
-static arena_t*
-arenas_extend();
static void*
huge_malloc(size_t size, bool zero);
static void*
huge_palloc(size_t aSize, size_t aAlignment, bool aZero);
static void*
huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize);
static void
huge_dalloc(void* aPtr);
@@ -2142,19 +2172,19 @@ thread_local_arena(bool enabled)
{
arena_t* arena;
if (enabled) {
// The arena will essentially be leaked if this function is
// called with `false`, but it doesn't matter at the moment.
// because in practice nothing actually calls this function
// with `false`, except maybe at shutdown.
- arena = arenas_extend();
+ arena = gArenas.CreateArena();
} else {
- arena = gMainArena;
+ arena = gArenas.GetDefault();
}
thread_arena.set(arena);
return arena;
}
template<>
inline void
MozJemalloc::jemalloc_thread_local_arena(bool aEnabled)
@@ -3807,48 +3837,38 @@ arena_t::arena_t()
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
}
#if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
mMagic = ARENA_MAGIC;
#endif
}
-static inline arena_t*
-arenas_fallback()
+arena_t*
+ArenaCollection::CreateArena()
{
- // Only reached if there is an OOM error.
-
- // OOM here is quite inconvenient to propagate, since dealing with it
- // would require a check for failure in the fast path. Instead, punt
- // by using the first arena.
- // In practice, this is an extremely unlikely failure.
- _malloc_message(_getprogname(), ": (malloc) Error initializing arena\n");
-
- return gMainArena;
-}
-
-// Create a new arena and return it.
-static arena_t*
-arenas_extend()
-{
- arena_t* ret;
fallible_t fallible;
-
- ret = new (fallible) arena_t();
+ arena_t* ret = new (fallible) arena_t();
if (!ret) {
- return arenas_fallback();
- }
-
- MutexAutoLock lock(arenas_lock);
+ // Only reached if there is an OOM error.
+
+ // OOM here is quite inconvenient to propagate, since dealing with it
+ // would require a check for failure in the fast path. Instead, punt
+ // by using the first arena.
+ // In practice, this is an extremely unlikely failure.
+ _malloc_message(_getprogname(), ": (malloc) Error initializing arena\n");
+
+ return mDefaultArena;
+ }
+
+ MutexAutoLock lock(mLock);
// TODO: Use random Ids.
- ret->mId = narenas++;
- gArenaTree.Insert(ret);
-
+ ret->mId = mLastArenaId++;
+ mArenas.Insert(ret);
return ret;
}
// End arena.
// ***************************************************************************
// Begin general internal functions.
static void*
@@ -4218,31 +4238,23 @@ static
huge_mapped = 0;
// Initialize base allocation data structures.
base_mapped = 0;
base_committed = 0;
base_nodes = nullptr;
base_mtx.Init();
- arenas_lock.Init();
-
- // Initialize one arena here.
- gArenaTree.Init();
- arenas_extend();
- gMainArena = gArenaTree.First();
- if (!gMainArena) {
+ // Initialize arenas collection here.
+ if (!gArenas.Init()) {
return false;
}
- // arena_t constructor sets this to a lower value for thread local arenas;
- // reset to the default value for the main arena.
- gMainArena->mMaxDirty = opt_dirty_max;
-
- // Assign the initial arena to the initial thread.
- thread_arena.set(gMainArena);
+
+ // Assign the default arena to the initial thread.
+ thread_arena.set(gArenas.GetDefault());
if (!gChunkRTree.Init()) {
return false;
}
malloc_initialized = true;
// Dummy call so that the function is not removed by dead-code elimination
@@ -4530,25 +4542,25 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
if (!malloc_initialized) {
memset(aStats, 0, sizeof(*aStats));
return;
}
// Gather runtime settings.
aStats->opt_junk = opt_junk;
aStats->opt_zero = opt_zero;
- aStats->narenas = narenas;
aStats->quantum = quantum;
aStats->small_max = small_max;
aStats->large_max = arena_maxclass;
aStats->chunksize = chunksize;
aStats->page_size = pagesize;
aStats->dirty_max = opt_dirty_max;
// Gather current memory usage statistics.
+ aStats->narenas = 0;
aStats->mapped = 0;
aStats->allocated = 0;
aStats->waste = 0;
aStats->page_cache = 0;
aStats->bookkeeping = 0;
aStats->bin_unused = 0;
non_arena_mapped = 0;
@@ -4564,27 +4576,23 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
// Get base mapped/allocated.
{
MutexAutoLock lock(base_mtx);
non_arena_mapped += base_mapped;
aStats->bookkeeping += base_committed;
MOZ_ASSERT(base_mapped >= base_committed);
}
- arenas_lock.Lock();
+ gArenas.mLock.Lock();
// Iterate over arenas.
- for (auto arena : gArenaTree.iter()) {
+ for (auto arena : gArenas.iter()) {
size_t arena_mapped, arena_allocated, arena_committed, arena_dirty, j,
arena_unused, arena_headers;
arena_run_t* run;
- if (!arena) {
- continue;
- }
-
arena_headers = 0;
arena_unused = 0;
{
MutexAutoLock lock(arena->mLock);
arena_mapped = arena->mStats.mapped;
@@ -4621,18 +4629,19 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
// allocated.
aStats->mapped += arena_mapped;
aStats->allocated += arena_allocated;
aStats->page_cache += arena_dirty;
aStats->waste += arena_committed - arena_allocated - arena_dirty -
arena_unused - arena_headers;
aStats->bin_unused += arena_unused;
aStats->bookkeeping += arena_headers;
- }
- arenas_lock.Unlock();
+ aStats->narenas++;
+ }
+ gArenas.mLock.Unlock();
// Account for arena chunk headers in bookkeeping rather than waste.
chunk_header_size =
((aStats->mapped / aStats->chunksize) * arena_chunk_header_npages)
<< pagesize_2pow;
aStats->mapped += non_arena_mapped;
aStats->bookkeeping += chunk_header_size;
@@ -4686,18 +4695,18 @@ arena_t::HardPurge()
}
}
template<>
inline void
MozJemalloc::jemalloc_purge_freed_pages()
{
if (malloc_initialized) {
- MutexAutoLock lock(arenas_lock);
- for (auto arena : gArenaTree.iter()) {
+ MutexAutoLock lock(gArenas.mLock);
+ for (auto arena : gArenas.iter()) {
arena->HardPurge();
}
}
}
#else // !defined MALLOC_DOUBLE_PURGE
template<>
@@ -4709,72 +4718,68 @@ MozJemalloc::jemalloc_purge_freed_pages(
#endif // defined MALLOC_DOUBLE_PURGE
template<>
inline void
MozJemalloc::jemalloc_free_dirty_pages(void)
{
if (malloc_initialized) {
- MutexAutoLock lock(arenas_lock);
- for (auto arena : gArenaTree.iter()) {
+ MutexAutoLock lock(gArenas.mLock);
+ for (auto arena : gArenas.iter()) {
MutexAutoLock arena_lock(arena->mLock);
arena->Purge(true);
}
}
}
inline arena_t*
-arena_t::GetById(arena_id_t aArenaId)
+ArenaCollection::GetById(arena_id_t aArenaId)
{
if (!malloc_initialized) {
return nullptr;
}
// Use AlignedStorage2 to avoid running the arena_t constructor, while
// we only need it as a placeholder for mId.
mozilla::AlignedStorage2<arena_t> key;
key.addr()->mId = aArenaId;
- MutexAutoLock lock(arenas_lock);
- arena_t* result = gArenaTree.Search(key.addr());
+ MutexAutoLock lock(mLock);
+ arena_t* result = mArenas.Search(key.addr());
MOZ_RELEASE_ASSERT(result);
return result;
}
#ifdef NIGHTLY_BUILD
template<>
inline arena_id_t
MozJemalloc::moz_create_arena()
{
if (malloc_init()) {
- arena_t* arena = arenas_extend();
+ arena_t* arena = gArenas.CreateArena();
return arena->mId;
}
return 0;
}
template<>
inline void
MozJemalloc::moz_dispose_arena(arena_id_t aArenaId)
{
- arena_t* arena = arena_t::GetById(aArenaId);
+ arena_t* arena = gArenas.GetById(aArenaId);
if (arena) {
- MutexAutoLock lock(arenas_lock);
- gArenaTree.Remove(arena);
- // The arena is leaked, and remaining allocations in it still are alive
- // until they are freed. After that, the arena will be empty but still
- // taking have at least a chunk taking address space. TODO: bug 1364359.
+ gArenas.DisposeArena(arena);
}
}
#define MALLOC_DECL(name, return_type, ...) \
template<> \
inline return_type MozJemalloc::moz_arena_##name( \
arena_id_t aArenaId, ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
{ \
- BaseAllocator allocator(arena_t::GetById(aArenaId)); \
+ BaseAllocator allocator(gArenas.GetById(aArenaId)); \
return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
}
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
#include "malloc_decls.h"
#else
#define MALLOC_DECL(name, return_type, ...) \
@@ -4797,19 +4802,19 @@ MozJemalloc::moz_dispose_arena(arena_id_
// is threaded here.
#ifndef XP_DARWIN
static
#endif
void
_malloc_prefork(void)
{
// Acquire all mutexes in a safe order.
- arenas_lock.Lock();
-
- for (auto arena : gArenaTree.iter()) {
+ gArenas.mLock.Lock();
+
+ for (auto arena : gArenas.iter()) {
arena->mLock.Lock();
}
base_mtx.Lock();
huge_mtx.Lock();
}
@@ -4819,37 +4824,39 @@ static
void
_malloc_postfork_parent(void)
{
// Release all mutexes, now that fork() has completed.
huge_mtx.Unlock();
base_mtx.Unlock();
- for (auto arena : gArenaTree.iter()) {
+ for (auto arena : gArenas.iter()) {
arena->mLock.Unlock();
}
- arenas_lock.Unlock();
+
+ gArenas.mLock.Unlock();
}
#ifndef XP_DARWIN
static
#endif
void
_malloc_postfork_child(void)
{
// Reinitialize all mutexes, now that fork() has completed.
huge_mtx.Init();
base_mtx.Init();
- for (auto arena : gArenaTree.iter()) {
+ for (auto arena : gArenas.iter()) {
arena->mLock.Init();
}
- arenas_lock.Init();
+
+ gArenas.mLock.Init();
}
// End library-private functions.
// ***************************************************************************
#ifdef MOZ_REPLACE_MALLOC
// Windows doesn't come with weak imports as they are possible with
// LD_PRELOAD or DYLD_INSERT_LIBRARIES on Linux/OSX. On this platform,
// the replacement functions are defined as variable pointers to the