Bug 1418104 - Allow to pass parameters when creating arenas. r?njn
The immediate goal for this is to allow determinism in an arena used for
an upcoming test, by essentially disabling purge on that specific arena.
We do that by allowing arenas to be created with a specific setting for
mMaxDirty.
Incidentally, this allows to cleanup the mMaxDirty initialization for
thread-local arenas.
Longer term, this would allow to tweak arenas with more parameters, on
a per arena basis.
--- a/memory/build/malloc_decls.h
+++ b/memory/build/malloc_decls.h
@@ -97,18 +97,19 @@ MALLOC_DECL(jemalloc_thread_local_arena,
// Provide information about any allocation enclosing the given address.
MALLOC_DECL(jemalloc_ptr_info, void, const void*, jemalloc_ptr_info_t*)
#endif
#if MALLOC_FUNCS & MALLOC_FUNCS_ARENA_BASE
// Creates a separate arena, and returns its id, valid to use with moz_arena_*
-// functions.
-MALLOC_DECL(moz_create_arena, arena_id_t)
+// functions. A helper is provided in mozmemory.h that doesn't take any
+// arena_params_t: moz_create_arena.
+MALLOC_DECL(moz_create_arena_with_params, arena_id_t, arena_params_t*)
// Dispose of the given arena. Subsequent uses of the arena will crash.
// Passing an invalid id (inexistent or already disposed) to this function
// will crash.
MALLOC_DECL(moz_dispose_arena, void, arena_id_t)
#endif
#if MALLOC_FUNCS & MALLOC_FUNCS_ARENA_ALLOC
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -994,17 +994,17 @@ public:
// 33 | 496 |
// 34 | 512 |
// --------+------+
// 35 | 1024 |
// 36 | 2048 |
// --------+------+
arena_bin_t mBins[1]; // Dynamically sized.
- arena_t();
+ explicit arena_t(arena_params_t* aParams);
private:
void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
void DeallocChunk(arena_chunk_t* aChunk);
arena_run_t* AllocRun(size_t aSize, bool aLarge, bool aZero);
@@ -1096,29 +1096,27 @@ struct ArenaTreeTrait
// used by the standard API.
class ArenaCollection
{
public:
bool Init()
{
mArenas.Init();
mPrivateArenas.Init();
+ arena_params_t params;
+ // The main arena allows more dirty pages than the default for other arenas.
+ params.mMaxDirty = opt_dirty_max;
mDefaultArena =
- mLock.Init() ? CreateArena(/* IsPrivate = */ false) : nullptr;
- if (mDefaultArena) {
- // arena_t constructor sets this to a lower value for thread local
- // arenas; Reset to the default value for the main arena.
- mDefaultArena->mMaxDirty = opt_dirty_max;
- }
+ mLock.Init() ? CreateArena(/* IsPrivate = */ false, ¶ms) : nullptr;
return bool(mDefaultArena);
}
inline arena_t* GetById(arena_id_t aArenaId, bool aIsPrivate);
- arena_t* CreateArena(bool aIsPrivate);
+ arena_t* CreateArena(bool aIsPrivate, arena_params_t* aParams);
void DisposeArena(arena_t* aArena)
{
MutexAutoLock lock(mLock);
(mPrivateArenas.Search(aArena) ? mPrivateArenas : mArenas).Remove(aArena);
// The arena is leaked, and remaining allocations in it still are alive
// until they are freed. After that, the arena will be empty but still
// taking have at least a chunk taking address space. TODO: bug 1364359.
@@ -2215,17 +2213,18 @@ thread_local_arena(bool enabled)
{
arena_t* arena;
if (enabled) {
// The arena will essentially be leaked if this function is
// called with `false`, but it doesn't matter at the moment.
// because in practice nothing actually calls this function
// with `false`, except maybe at shutdown.
- arena = gArenas.CreateArena(/* IsPrivate = */ false);
+ arena =
+ gArenas.CreateArena(/* IsPrivate = */ false, /* Params = */ nullptr);
} else {
arena = gArenas.GetDefault();
}
thread_arena.set(arena);
return arena;
}
template<>
@@ -3760,36 +3759,38 @@ iralloc(void* aPtr, size_t aSize, arena_
aArena = aArena ? aArena : arena;
size_t oldsize = info.Size();
MOZ_DIAGNOSTIC_ASSERT(aArena->mMagic == ARENA_MAGIC);
return (aSize <= gMaxLargeClass) ? arena_ralloc(aPtr, aSize, oldsize, aArena)
: huge_ralloc(aPtr, aSize, oldsize, aArena);
}
-arena_t::arena_t()
+arena_t::arena_t(arena_params_t* aParams)
{
unsigned i;
MOZ_RELEASE_ASSERT(mLock.Init());
memset(&mLink, 0, sizeof(mLink));
memset(&mStats, 0, sizeof(arena_stats_t));
// Initialize chunks.
mChunksDirty.Init();
#ifdef MALLOC_DOUBLE_PURGE
new (&mChunksMAdvised) DoublyLinkedList<arena_chunk_t>();
#endif
mSpare = nullptr;
mNumDirty = 0;
- // Reduce the maximum amount of dirty pages we allow to be kept on
- // thread local arenas. TODO: make this more flexible.
- mMaxDirty = opt_dirty_max >> 3;
+
+ // The default maximum amount of dirty pages allowed on arenas is a fraction
+ // of opt_dirty_max.
+ mMaxDirty =
+ (aParams && aParams->mMaxDirty) ? aParams->mMaxDirty : (opt_dirty_max / 8);
mRunsAvail.Init();
// Initialize bins.
SizeClass sizeClass(1);
for (i = 0;; i++) {
arena_bin_t& bin = mBins[i];
@@ -3805,20 +3806,20 @@ arena_t::arena_t()
kNumTinyClasses + kNumQuantumClasses + gNumSubPageClasses - 1);
#if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
mMagic = ARENA_MAGIC;
#endif
}
arena_t*
-ArenaCollection::CreateArena(bool aIsPrivate)
+ArenaCollection::CreateArena(bool aIsPrivate, arena_params_t* aParams)
{
fallible_t fallible;
- arena_t* ret = new (fallible) arena_t();
+ arena_t* ret = new (fallible) arena_t(aParams);
if (!ret) {
// Only reached if there is an OOM error.
// OOM here is quite inconvenient to propagate, since dealing with it
// would require a check for failure in the fast path. Instead, punt
// by using the first arena.
// In practice, this is an extremely unlikely failure.
_malloc_message(_getprogname(), ": (malloc) Error initializing arena\n");
@@ -4677,20 +4678,20 @@ ArenaCollection::GetById(arena_id_t aAre
arena_t* result = (aIsPrivate ? mPrivateArenas : mArenas).Search(key.addr());
MOZ_RELEASE_ASSERT(result);
return result;
}
#ifdef NIGHTLY_BUILD
template<>
inline arena_id_t
-MozJemalloc::moz_create_arena()
+MozJemalloc::moz_create_arena_with_params(arena_params_t* aParams)
{
if (malloc_init()) {
- arena_t* arena = gArenas.CreateArena(/* IsPrivate = */ true);
+ arena_t* arena = gArenas.CreateArena(/* IsPrivate = */ true, aParams);
return arena->mId;
}
return 0;
}
template<>
inline void
MozJemalloc::moz_dispose_arena(arena_id_t aArenaId)
@@ -4945,17 +4946,18 @@ replace_malloc_init_funcs()
if (!replace_malloc_table.aligned_alloc && replace_malloc_table.memalign) {
replace_malloc_table.aligned_alloc =
AlignedAllocator<ReplaceMalloc::memalign>::aligned_alloc;
}
if (!replace_malloc_table.valloc && replace_malloc_table.memalign) {
replace_malloc_table.valloc =
AlignedAllocator<ReplaceMalloc::memalign>::valloc;
}
- if (!replace_malloc_table.moz_create_arena && replace_malloc_table.malloc) {
+ if (!replace_malloc_table.moz_create_arena_with_params &&
+ replace_malloc_table.malloc) {
#define MALLOC_DECL(name, ...) \
replace_malloc_table.name = DummyArenaAllocator<ReplaceMalloc>::name;
#define MALLOC_FUNCS MALLOC_FUNCS_ARENA
#include "malloc_decls.h"
}
#define MALLOC_DECL(name, ...) \
if (!replace_malloc_table.name) { \
--- a/memory/build/mozjemalloc.h
+++ b/memory/build/mozjemalloc.h
@@ -62,17 +62,17 @@ typedef MozJemalloc DefaultMalloc;
#endif // MOZ_MEMORY
// Dummy implementation of the moz_arena_* API, falling back to a given
// implementation of the base allocator.
template<typename T>
struct DummyArenaAllocator
{
- static arena_id_t moz_create_arena(void) { return 0; }
+ static arena_id_t moz_create_arena_with_params(arena_params_t*) { return 0; }
static void moz_dispose_arena(arena_id_t) {}
#define MALLOC_DECL(name, return_type, ...) \
static return_type moz_arena_##name(arena_id_t, \
ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
{ \
return T::name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
--- a/memory/build/mozjemalloc_types.h
+++ b/memory/build/mozjemalloc_types.h
@@ -51,16 +51,28 @@ extern "C" {
#ifndef MALLOC_USABLE_SIZE_CONST_PTR
#define MALLOC_USABLE_SIZE_CONST_PTR const
#endif
typedef MALLOC_USABLE_SIZE_CONST_PTR void* usable_ptr_t;
typedef size_t arena_id_t;
+typedef struct arena_params_s
+{
+ size_t mMaxDirty;
+
+#ifdef __cplusplus
+ arena_params_s()
+ : mMaxDirty(0)
+ {
+ }
+#endif
+} arena_params_t;
+
// jemalloc_stats() is not a stable interface. When using jemalloc_stats_t, be
// sure that the compiled results of jemalloc.c are in sync with this header
// file.
typedef struct
{
// Run-time configuration settings.
bool opt_junk; // Fill allocated memory with kAllocJunk?
bool opt_zero; // Fill allocated memory with 0x0?
--- a/memory/build/mozmemory.h
+++ b/memory/build/mozmemory.h
@@ -54,9 +54,15 @@ static inline size_t
#endif
#define MALLOC_DECL(name, return_type, ...) \
MOZ_JEMALLOC_API return_type name(__VA_ARGS__);
#define MALLOC_FUNCS MALLOC_FUNCS_ARENA
#include "malloc_decls.h"
+#ifdef __cplusplus
+#define moz_create_arena() moz_create_arena_with_params(nullptr)
+#else
+#define moz_create_arena() moz_create_arena_with_params(NULL)
+#endif
+
#endif // mozmemory_h