--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -144,16 +144,18 @@
#include <errno.h>
#include <stdlib.h>
#include <limits.h>
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
#include <algorithm>
+using namespace mozilla;
+
#ifdef XP_WIN
// Some defines from the CRT internal headers that we need here.
#define _CRT_SPINCOUNT 5000
#include <io.h>
#include <windows.h>
#include <intrin.h>
@@ -457,17 +459,17 @@ static size_t arena_maxclass; // Max siz
// Recycle at most 128 chunks. With 1 MiB chunks, this means we retain at most
// 6.25% of the process address space on a 32-bit OS for later use.
#define CHUNK_RECYCLE_LIMIT 128
static const size_t gRecycleLimit = CHUNK_RECYCLE_LIMIT * CHUNKSIZE_DEFAULT;
// The current amount of recycled bytes, updated atomically.
-static mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gRecycledSize;
+static Atomic<size_t, ReleaseAcquire> gRecycledSize;
// ***************************************************************************
// MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
#if defined(MALLOC_DECOMMIT) && defined(MALLOC_DOUBLE_PURGE)
#error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
#endif
// Mutexes based on spinlocks. We can't use normal pthread spinlocks in all
@@ -502,17 +504,17 @@ struct MOZ_RAII MutexAutoLock
~MutexAutoLock() { mMutex.Unlock(); }
private:
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER;
Mutex& mMutex;
};
// Set to true once the allocator has been initialized.
-static mozilla::Atomic<bool> malloc_initialized(false);
+static Atomic<bool> malloc_initialized(false);
#if defined(XP_WIN)
// No init lock for Windows.
#elif defined(XP_DARWIN)
static Mutex gInitLock = { OS_SPINLOCK_INIT };
#elif defined(XP_LINUX) && !defined(ANDROID)
static Mutex gInitLock = { PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP };
#else
@@ -801,17 +803,17 @@ struct arena_chunk_t
#ifdef MALLOC_DOUBLE_PURGE
// If we're double-purging, we maintain a linked list of chunks which
// have pages which have been madvise(MADV_FREE)'d but not explicitly
// purged.
//
// We're currently lazy and don't remove a chunk from this list when
// all its madvised pages are recommitted.
- mozilla::DoublyLinkedListElement<arena_chunk_t> chunks_madvised_elem;
+ DoublyLinkedListElement<arena_chunk_t> chunks_madvised_elem;
#endif
// Number of dirty pages.
size_t ndirty;
// Map of pages within chunk that keeps track of free/large/small.
arena_chunk_map_t map[1]; // Dynamically sized.
};
@@ -915,17 +917,17 @@ struct arena_t
private:
// Tree of dirty-page-containing chunks this arena manages.
RedBlackTree<arena_chunk_t, ArenaDirtyChunkTrait> mChunksDirty;
#ifdef MALLOC_DOUBLE_PURGE
// Head of a linked list of MADV_FREE'd-page-containing chunks this
// arena manages.
- mozilla::DoublyLinkedList<arena_chunk_t> mChunksMAdvised;
+ DoublyLinkedList<arena_chunk_t> mChunksMAdvised;
#endif
// In order to avoid rapid chunk allocation/deallocation when an arena
// oscillates right on the cusp of needing a new chunk, cache the most
// recently freed chunk. The spare is left in the arena's chunk trees
// until it is deleted.
//
// There is one spare chunk per arena, rather than one spare total, in
@@ -1107,18 +1109,17 @@ static Mutex arenas_lock; // Protects ar
// The arena associated with the current thread (per jemalloc_thread_local_arena)
// On OSX, __thread/thread_local circles back calling malloc to allocate storage
// on first access on each thread, which leads to an infinite loop, but
// pthread-based TLS somehow doesn't have this problem.
#if !defined(XP_DARWIN)
static MOZ_THREAD_LOCAL(arena_t*) thread_arena;
#else
-static mozilla::detail::ThreadLocal<arena_t*,
- mozilla::detail::ThreadLocalKeyStorage>
+static detail::ThreadLocal<arena_t*, detail::ThreadLocalKeyStorage>
thread_arena;
#endif
// The main arena, which all threads default to until jemalloc_thread_local_arena
// is called.
static arena_t* gMainArena;
// *****************************
@@ -1512,17 +1513,17 @@ base_node_dealloc(extent_node_t* aNode)
base_nodes = aNode;
}
struct BaseNodeFreePolicy
{
void operator()(extent_node_t* aPtr) { base_node_dealloc(aPtr); }
};
-using UniqueBaseNode = mozilla::UniquePtr<extent_node_t, BaseNodeFreePolicy>;
+using UniqueBaseNode = UniquePtr<extent_node_t, BaseNodeFreePolicy>;
// End Utility functions/macros.
// ***************************************************************************
// Begin chunk management functions.
#ifdef XP_WIN
static void*
@@ -2516,18 +2517,17 @@ arena_t::InitChunk(arena_chunk_t* aChunk
pages_decommit(run, arena_maxclass);
#endif
mStats.committed += arena_chunk_header_npages;
// Insert the run into the tree of available runs.
mRunsAvail.Insert(&aChunk->map[arena_chunk_header_npages]);
#ifdef MALLOC_DOUBLE_PURGE
- new (&aChunk->chunks_madvised_elem)
- mozilla::DoublyLinkedListElement<arena_chunk_t>();
+ new (&aChunk->chunks_madvised_elem) DoublyLinkedListElement<arena_chunk_t>();
#endif
}
void
arena_t::DeallocChunk(arena_chunk_t* aChunk)
{
if (mSpare) {
if (mSpare->ndirty > 0) {
@@ -3779,17 +3779,17 @@ arena_t::Init()
}
memset(&mLink, 0, sizeof(mLink));
memset(&mStats, 0, sizeof(arena_stats_t));
// Initialize chunks.
mChunksDirty.Init();
#ifdef MALLOC_DOUBLE_PURGE
- new (&mChunksMAdvised) mozilla::DoublyLinkedList<arena_chunk_t>();
+ new (&mChunksMAdvised) DoublyLinkedList<arena_chunk_t>();
#endif
mSpare = nullptr;
mNumDirty = 0;
// Reduce the maximum amount of dirty pages we allow to be kept on
// thread local arenas. TODO: make this more flexible.
mMaxDirty = opt_dirty_max >> 3;
@@ -4701,18 +4701,18 @@ hard_purge_chunk(arena_chunk_t* aChunk)
aChunk->map[i + npages].bits ^= CHUNK_MAP_MADVISED_OR_DECOMMITTED;
}
// We could use mincore to find out which pages are actually
// present, but it's not clear that's better.
if (npages > 0) {
pages_decommit(((char*)aChunk) + (i << pagesize_2pow),
npages << pagesize_2pow);
- mozilla::Unused << pages_commit(((char*)aChunk) + (i << pagesize_2pow),
- npages << pagesize_2pow);
+ Unused << pages_commit(((char*)aChunk) + (i << pagesize_2pow),
+ npages << pagesize_2pow);
}
i += npages;
}
}
// Explicitly remove all of this arena's MADV_FREE'd pages from memory.
void
arena_t::HardPurge()