--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -103,24 +103,26 @@
// Huge : Each allocation is backed by a dedicated contiguous set of chunks.
// Metadata are stored in a separate red-black tree.
//
// *****************************************************************************
#include "mozmemory_wrap.h"
#include "mozjemalloc.h"
#include "mozilla/Atomics.h"
+#include "mozilla/Alignment.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/DoublyLinkedList.h"
#include "mozilla/GuardObjects.h"
#include "mozilla/Likely.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/Sprintf.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/Unused.h"
+#include "mozilla/fallible.h"
#include "Utils.h"
// On Linux, we use madvise(MADV_DONTNEED) to release memory back to the
// operating system. If we release 1MB of live pages with MADV_DONTNEED, our
// RSS will decrease by 1MB (almost) immediately.
//
// On Mac, we use madvise(MADV_FREE). Unlike MADV_DONTNEED on Linux, MADV_FREE
// on Mac doesn't cause the OS to release the specified pages immediately; the
@@ -443,16 +445,19 @@ static const size_t gRecycleLimit = CHUN
static Atomic<size_t, ReleaseAcquire> gRecycledSize;
// ***************************************************************************
// MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
#if defined(MALLOC_DECOMMIT) && defined(MALLOC_DOUBLE_PURGE)
#error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
#endif
+static void*
+base_alloc(size_t aSize);
+
// Mutexes based on spinlocks. We can't use normal pthread spinlocks in all
// places, because they require malloc()ed memory, which causes bootstrapping
// issues in some cases.
struct Mutex
{
#if defined(XP_WIN)
CRITICAL_SECTION mMutex;
#elif defined(XP_DARWIN)
@@ -945,17 +950,17 @@ public:
// 33 | 496 |
// 34 | 512 |
// --------+------+
// 35 | 1024 |
// 36 | 2048 |
// --------+------+
arena_bin_t mBins[1]; // Dynamically sized.
- bool Init();
+ arena_t();
static inline arena_t* GetById(arena_id_t aArenaId);
private:
void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
void DeallocChunk(arena_chunk_t* aChunk);
@@ -1011,16 +1016,31 @@ public:
bool RallocGrowLarge(arena_chunk_t* aChunk,
void* aPtr,
size_t aSize,
size_t aOldSize);
void Purge(bool aAll);
void HardPurge();
+
+ void* operator new(size_t aCount) = delete;
+
+ void* operator new(size_t aCount, const fallible_t&)
+#if !defined(_MSC_VER) || defined(_CPPUNWIND)
+ noexcept
+#endif
+ {
+ MOZ_ASSERT(aCount == sizeof(arena_t));
+ // Allocate enough space for trailing bins.
+ return base_alloc(aCount +
+ (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
+ }
+
+ void operator delete(void*) = delete;
};
struct ArenaTreeTrait
{
static RedBlackTreeNode<arena_t>& GetTreeNode(arena_t* aThis)
{
return aThis->mLink;
}
@@ -3715,27 +3735,23 @@ iralloc(void* aPtr, size_t aSize, arena_
MOZ_ASSERT(aSize != 0);
oldsize = isalloc(aPtr);
return (aSize <= arena_maxclass) ? arena_ralloc(aPtr, aSize, oldsize, aArena)
: huge_ralloc(aPtr, aSize, oldsize);
}
-// Returns whether initialization succeeded.
-bool
-arena_t::Init()
+arena_t::arena_t()
{
unsigned i;
arena_bin_t* bin;
size_t prev_run_size;
- if (!mLock.Init()) {
- return false;
- }
+ MOZ_RELEASE_ASSERT(mLock.Init());
memset(&mLink, 0, sizeof(mLink));
memset(&mStats, 0, sizeof(arena_stats_t));
// Initialize chunks.
mChunksDirty.Init();
#ifdef MALLOC_DOUBLE_PURGE
new (&mChunksMAdvised) DoublyLinkedList<arena_chunk_t>();
@@ -3789,18 +3805,16 @@ arena_t::Init()
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
}
#if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
mMagic = ARENA_MAGIC;
#endif
-
- return true;
}
static inline arena_t*
arenas_fallback()
{
// Only reached if there is an OOM error.
// OOM here is quite inconvenient to propagate, since dealing with it
@@ -3812,21 +3826,20 @@ arenas_fallback()
return gMainArena;
}
// Create a new arena and return it.
static arena_t*
arenas_extend()
{
arena_t* ret;
-
- // Allocate enough space for trailing bins.
- ret = (arena_t*)base_alloc(
- sizeof(arena_t) + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
- if (!ret || !ret->Init()) {
+ fallible_t fallible;
+
+ ret = new (fallible) arena_t();
+ if (!ret) {
return arenas_fallback();
}
MutexAutoLock lock(arenas_lock);
// TODO: Use random Ids.
ret->mId = narenas++;
gArenaTree.Insert(ret);
@@ -4214,17 +4227,17 @@ static
// Initialize one arena here.
gArenaTree.Init();
arenas_extend();
gMainArena = gArenaTree.First();
if (!gMainArena) {
return false;
}
- // arena_t::Init() sets this to a lower value for thread local arenas;
+ // arena_t constructor sets this to a lower value for thread local arenas;
// reset to the default value for the main arena.
gMainArena->mMaxDirty = opt_dirty_max;
// Assign the initial arena to the initial thread.
thread_arena.set(gMainArena);
if (!gChunkRTree.Init()) {
return false;
@@ -4710,20 +4723,22 @@ MozJemalloc::jemalloc_free_dirty_pages(v
}
inline arena_t*
arena_t::GetById(arena_id_t aArenaId)
{
if (!malloc_initialized) {
return nullptr;
}
- arena_t key;
- key.mId = aArenaId;
+ // Use AlignedStorage2 to avoid running the arena_t constructor, while
+ // we only need it as a placeholder for mId.
+ mozilla::AlignedStorage2<arena_t> key;
+ key.addr()->mId = aArenaId;
MutexAutoLock lock(arenas_lock);
- arena_t* result = gArenaTree.Search(&key);
+ arena_t* result = gArenaTree.Search(key.addr());
MOZ_RELEASE_ASSERT(result);
return result;
}
#ifdef NIGHTLY_BUILD
template<>
inline arena_id_t
MozJemalloc::moz_create_arena()