--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -694,16 +694,20 @@ struct arena_bin_t {
};
struct arena_t {
#if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
uint32_t mMagic;
# define ARENA_MAGIC 0x947d3d24
#endif
+ arena_id_t mId;
+ /* Linkage for the tree of arenas by id. */
+ rb_node(arena_t) mLink;
+
/* All operations on this arena require that lock be locked. */
malloc_spinlock_t mLock;
arena_stats_t mStats;
private:
/* Tree of dirty-page-containing chunks this arena manages. */
arena_chunk_tree_t mChunksDirty;
@@ -769,16 +773,18 @@ public:
* 35 | 1024 |
* 36 | 2048 |
* --------+------+
*/
arena_bin_t mBins[1]; /* Dynamically sized. */
bool Init();
+ static inline arena_t* GetById(arena_id_t aArenaId);
+
private:
void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
void DeallocChunk(arena_chunk_t* aChunk);
arena_run_t* AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero);
void DallocRun(arena_run_t* aRun, bool aDirty);
@@ -812,16 +818,18 @@ public:
bool RallocGrowLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize, size_t aOldSize);
void Purge(bool aAll);
void HardPurge();
};
+typedef rb_tree(arena_t) arena_tree_t;
+
/******************************************************************************/
/*
* Data.
*/
/*
* When MALLOC_STATIC_SIZES is defined most of the parameters
* controlling the malloc behavior are defined as compile-time constants
@@ -996,18 +1004,22 @@ static size_t base_committed;
/*
* Arenas.
*/
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
*/
-static arena_t **arenas;
-static unsigned narenas;
+static arena_t** arenas;
+// A tree of arenas, arranged by id.
+// TODO: Move into arena_t as a static member when rb_tree doesn't depend on
+// the type being defined anymore.
+static arena_tree_t gArenaTree;
+static unsigned narenas;
static malloc_spinlock_t arenas_lock; /* Protects arenas initialization. */
#ifndef NO_TLS
/*
* The arena associated with the current thread (per jemalloc_thread_local_arena)
* On OSX, __thread/thread_local circles back calling malloc to allocate storage
* on first access on each thread, which leads to an infinite loop, but
* pthread-based TLS somehow doesn't have this problem.
@@ -2352,16 +2364,28 @@ choose_arena(size_t size)
#else
ret = arenas[0];
#endif
MOZ_DIAGNOSTIC_ASSERT(ret);
return (ret);
}
static inline int
+arena_comp(arena_t* a, arena_t* b)
+{
+ MOZ_ASSERT(a);
+ MOZ_ASSERT(b);
+
+ return (a->mId > b->mId) - (a->mId < b->mId);
+}
+
+/* Wrap red-black tree macros in functions. */
+rb_wrap(static, arena_tree_, arena_tree_t, arena_t, mLink, arena_comp)
+
+static inline int
arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b)
{
uintptr_t a_chunk = (uintptr_t)a;
uintptr_t b_chunk = (uintptr_t)b;
MOZ_ASSERT(a);
MOZ_ASSERT(b);
@@ -4032,16 +4056,17 @@ arena_t::Init()
{
unsigned i;
arena_bin_t* bin;
size_t prev_run_size;
if (malloc_spin_init(&mLock))
return true;
+ memset(&mLink, 0, sizeof(mLink));
memset(&mStats, 0, sizeof(arena_stats_t));
/* Initialize chunks. */
arena_chunk_tree_dirty_new(&mChunksDirty);
#ifdef MALLOC_DOUBLE_PURGE
new (&mChunksMAdvised) mozilla::DoublyLinkedList<arena_chunk_t>();
#endif
mSpare = nullptr;
@@ -4115,62 +4140,65 @@ arenas_fallback()
*/
_malloc_message(_getprogname(),
": (malloc) Error initializing arena\n");
return arenas[0];
}
/* Create a new arena and return it. */
-static arena_t *
+static arena_t*
arenas_extend()
{
- /*
- * The list of arenas is first allocated to contain at most 16 elements,
- * and when the limit is reached, the list is grown such that it can
- * contain 16 more elements.
- */
- const size_t arenas_growth = 16;
- arena_t *ret;
-
-
- /* Allocate enough space for trailing bins. */
- ret = (arena_t *)base_alloc(sizeof(arena_t)
- + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
- if (!ret || ret->Init()) {
- return arenas_fallback();
- }
-
- malloc_spin_lock(&arenas_lock);
-
- /* Allocate and initialize arenas. */
- if (narenas % arenas_growth == 0) {
- size_t max_arenas = ((narenas + arenas_growth) / arenas_growth) * arenas_growth;
- /*
- * We're unfortunately leaking the previous allocation ;
- * the base allocator doesn't know how to free things
- */
- arena_t** new_arenas = (arena_t **)base_alloc(sizeof(arena_t *) * max_arenas);
- if (!new_arenas) {
- ret = arenas ? arenas_fallback() : nullptr;
- malloc_spin_unlock(&arenas_lock);
- return (ret);
- }
- memcpy(new_arenas, arenas, narenas * sizeof(arena_t *));
- /*
- * Zero the array. In practice, this should always be pre-zeroed,
- * since it was just mmap()ed, but let's be sure.
- */
- memset(new_arenas + narenas, 0, sizeof(arena_t *) * (max_arenas - narenas));
- arenas = new_arenas;
- }
- arenas[narenas++] = ret;
-
- malloc_spin_unlock(&arenas_lock);
- return (ret);
+ /*
+ * The list of arenas is first allocated to contain at most 16 elements,
+ * and when the limit is reached, the list is grown such that it can
+ * contain 16 more elements.
+ */
+ const size_t arenas_growth = 16;
+ arena_t* ret;
+
+ /* Allocate enough space for trailing bins. */
+ ret = (arena_t *)base_alloc(sizeof(arena_t)
+ + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
+ if (!ret || ret->Init()) {
+ return arenas_fallback();
+ }
+
+ malloc_spin_lock(&arenas_lock);
+
+ // TODO: Use random Ids.
+ ret->mId = narenas;
+ arena_tree_insert(&gArenaTree, ret);
+
+ /* Allocate and initialize arenas. */
+ if (narenas % arenas_growth == 0) {
+ size_t max_arenas = ((narenas + arenas_growth) / arenas_growth) * arenas_growth;
+ /*
+ * We're unfortunately leaking the previous allocation ;
+ * the base allocator doesn't know how to free things
+ */
+ arena_t** new_arenas = (arena_t**)base_alloc(sizeof(arena_t*) * max_arenas);
+ if (!new_arenas) {
+ ret = arenas ? arenas_fallback() : nullptr;
+ malloc_spin_unlock(&arenas_lock);
+ return ret;
+ }
+ memcpy(new_arenas, arenas, narenas * sizeof(arena_t*));
+ /*
+ * Zero the array. In practice, this should always be pre-zeroed,
+ * since it was just mmap()ed, but let's be sure.
+ */
+ memset(new_arenas + narenas, 0, sizeof(arena_t*) * (max_arenas - narenas));
+ arenas = new_arenas;
+ }
+ arenas[narenas++] = ret;
+
+ malloc_spin_unlock(&arenas_lock);
+ return ret;
}
/*
* End arena.
*/
/******************************************************************************/
/*
* Begin general internal functions.
@@ -4628,16 +4656,17 @@ MALLOC_OUT:
base_nodes = nullptr;
malloc_mutex_init(&base_mtx);
malloc_spin_init(&arenas_lock);
/*
* Initialize one arena here.
*/
+ arena_tree_new(&gArenaTree);
arenas_extend();
if (!arenas || !arenas[0]) {
#ifndef XP_WIN
malloc_mutex_unlock(&init_lock);
#endif
return true;
}
/* arena_t::Init() sets this to a lower value for thread local arenas;
@@ -5151,25 +5180,71 @@ MozJemalloc::jemalloc_free_dirty_pages(v
malloc_spin_lock(&arena->mLock);
arena->Purge(true);
malloc_spin_unlock(&arena->mLock);
}
}
malloc_spin_unlock(&arenas_lock);
}
+inline arena_t*
+arena_t::GetById(arena_id_t aArenaId)
+{
+ arena_t key;
+ key.mId = aArenaId;
+ malloc_spin_lock(&arenas_lock);
+ arena_t* result = arena_tree_search(&gArenaTree, &key);
+ malloc_spin_unlock(&arenas_lock);
+ MOZ_RELEASE_ASSERT(result);
+ return result;
+}
+
+#ifdef NIGHTLY_BUILD
+template<> inline arena_id_t
+MozJemalloc::moz_create_arena()
+{
+ arena_t* arena = arenas_extend();
+ return arena->mId;
+}
+
+template<> inline void
+MozJemalloc::moz_dispose_arena(arena_id_t aArenaId)
+{
+ arena_t* arena = arena_t::GetById(aArenaId);
+ malloc_spin_lock(&arenas_lock);
+ arena_tree_remove(&gArenaTree, arena);
+ // The arena is leaked, and remaining allocations in it still are alive
+ // until they are freed. After that, the arena will be empty but still
+ // taking have at least a chunk taking address space. TODO: bug 1364359.
+ malloc_spin_unlock(&arenas_lock);
+}
+
+#define MALLOC_DECL(name, return_type, ...) \
+ template<> inline return_type \
+ MozJemalloc::moz_arena_ ## name(arena_id_t aArenaId, ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
+ { \
+ BaseAllocator allocator(arena_t::GetById(aArenaId)); \
+ return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
+ }
+#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
+#include "malloc_decls.h"
+
+#else
+
#define MALLOC_DECL(name, return_type, ...) \
template<> inline return_type \
MozJemalloc::name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
{ \
return DummyArenaAllocator<MozJemalloc>::name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
}
#define MALLOC_FUNCS MALLOC_FUNCS_ARENA
#include "malloc_decls.h"
+#endif
+
/*
* End non-standard functions.
*/
/******************************************************************************/
/*
* Begin library-private functions, used by threading libraries for protection
* of malloc during fork(). These functions are only called if the program is
* running in threaded mode, so there is no need to check whether the program