--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -152,80 +152,80 @@
#ifdef XP_WIN
// Some defines from the CRT internal headers that we need here.
#define _CRT_SPINCOUNT 5000
#include <io.h>
#include <windows.h>
#include <intrin.h>
-#define SIZE_T_MAX SIZE_MAX
-#define STDERR_FILENO 2
+#define SIZE_T_MAX SIZE_MAX
+#define STDERR_FILENO 2
// Use MSVC intrinsics.
#pragma intrinsic(_BitScanForward)
static __forceinline int
ffs(int x)
{
- unsigned long i;
-
- if (_BitScanForward(&i, x) != 0) {
- return i + 1;
- }
- return 0;
+ unsigned long i;
+
+ if (_BitScanForward(&i, x) != 0) {
+ return i + 1;
+ }
+ return 0;
}
// Implement getenv without using malloc.
static char mozillaMallocOptionsBuf[64];
-#define getenv xgetenv
-static char *
-getenv(const char *name)
+#define getenv xgetenv
+static char*
+getenv(const char* name)
{
- if (GetEnvironmentVariableA(name, mozillaMallocOptionsBuf,
- sizeof(mozillaMallocOptionsBuf)) > 0) {
- return mozillaMallocOptionsBuf;
- }
-
- return nullptr;
+ if (GetEnvironmentVariableA(
+ name, mozillaMallocOptionsBuf, sizeof(mozillaMallocOptionsBuf)) > 0) {
+ return mozillaMallocOptionsBuf;
+ }
+
+ return nullptr;
}
#if defined(_WIN64)
typedef long long ssize_t;
#else
typedef long ssize_t;
#endif
-#define MALLOC_DECOMMIT
+#define MALLOC_DECOMMIT
#endif
#ifndef XP_WIN
#ifndef XP_SOLARIS
#include <sys/cdefs.h>
#endif
#include <sys/mman.h>
#ifndef MADV_FREE
-# define MADV_FREE MADV_DONTNEED
+#define MADV_FREE MADV_DONTNEED
#endif
#ifndef MAP_NOSYNC
-# define MAP_NOSYNC 0
+#define MAP_NOSYNC 0
#endif
#include <sys/param.h>
#include <sys/time.h>
#include <sys/types.h>
#if !defined(XP_SOLARIS) && !defined(ANDROID)
#include <sys/sysctl.h>
#endif
#include <sys/uio.h>
#include <errno.h>
#include <limits.h>
#ifndef SIZE_T_MAX
-# define SIZE_T_MAX SIZE_MAX
+#define SIZE_T_MAX SIZE_MAX
#endif
#include <pthread.h>
#include <sched.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
@@ -254,134 +254,133 @@ typedef long ssize_t;
// up in a dead lock in jemalloc.
// On these systems, we prefer to directly use the system call.
// We do that for Linux systems and kfreebsd with GNU userland.
// Note sanity checks are not done (alignment of offset, ...) because
// the uses of mmap are pretty limited, in jemalloc.
//
// On Alpha, glibc has a bug that prevents syscall() to work for system
// calls with 6 arguments.
-#if (defined(XP_LINUX) && !defined(__alpha__)) || \
- (defined(__FreeBSD_kernel__) && defined(__GLIBC__))
+#if (defined(XP_LINUX) && !defined(__alpha__)) || \
+ (defined(__FreeBSD_kernel__) && defined(__GLIBC__))
#include <sys/syscall.h>
#if defined(SYS_mmap) || defined(SYS_mmap2)
-static inline
-void *_mmap(void *addr, size_t length, int prot, int flags,
- int fd, off_t offset)
+static inline void*
+_mmap(void* addr, size_t length, int prot, int flags, int fd, off_t offset)
{
// S390 only passes one argument to the mmap system call, which is a
// pointer to a structure containing the arguments.
#ifdef __s390__
- struct {
- void *addr;
- size_t length;
- long prot;
- long flags;
- long fd;
- off_t offset;
- } args = { addr, length, prot, flags, fd, offset };
- return (void *) syscall(SYS_mmap, &args);
+ struct
+ {
+ void* addr;
+ size_t length;
+ long prot;
+ long flags;
+ long fd;
+ off_t offset;
+ } args = { addr, length, prot, flags, fd, offset };
+ return (void*)syscall(SYS_mmap, &args);
#else
#if defined(ANDROID) && defined(__aarch64__) && defined(SYS_mmap2)
// Android NDK defines SYS_mmap2 for AArch64 despite it not supporting mmap2.
#undef SYS_mmap2
#endif
#ifdef SYS_mmap2
- return (void *) syscall(SYS_mmap2, addr, length, prot, flags,
- fd, offset >> 12);
+ return (void*)syscall(SYS_mmap2, addr, length, prot, flags, fd, offset >> 12);
#else
- return (void *) syscall(SYS_mmap, addr, length, prot, flags,
- fd, offset);
+ return (void*)syscall(SYS_mmap, addr, length, prot, flags, fd, offset);
#endif
#endif
}
#define mmap _mmap
#define munmap(a, l) syscall(SYS_munmap, a, l)
#endif
#endif
// Size of stack-allocated buffer passed to strerror_r().
-#define STRERROR_BUF 64
+#define STRERROR_BUF 64
// Minimum alignment of non-tiny allocations is 2^QUANTUM_2POW_MIN bytes.
-# define QUANTUM_2POW_MIN 4
+#define QUANTUM_2POW_MIN 4
#if defined(_WIN64) || defined(__LP64__)
-# define SIZEOF_PTR_2POW 3
+#define SIZEOF_PTR_2POW 3
#else
-# define SIZEOF_PTR_2POW 2
+#define SIZEOF_PTR_2POW 2
#endif
-#define SIZEOF_PTR (1U << SIZEOF_PTR_2POW)
+#define SIZEOF_PTR (1U << SIZEOF_PTR_2POW)
#include "rb.h"
// sizeof(int) == (1U << SIZEOF_INT_2POW).
#ifndef SIZEOF_INT_2POW
-# define SIZEOF_INT_2POW 2
+#define SIZEOF_INT_2POW 2
#endif
// Size and alignment of memory chunks that are allocated by the OS's virtual
// memory system.
-#define CHUNK_2POW_DEFAULT 20
+#define CHUNK_2POW_DEFAULT 20
// Maximum number of dirty pages per arena.
-#define DIRTY_MAX_DEFAULT (1U << 8)
+#define DIRTY_MAX_DEFAULT (1U << 8)
// Maximum size of L1 cache line. This is used to avoid cache line aliasing,
// so over-estimates are okay (up to a point), but under-estimates will
// negatively affect performance.
-#define CACHELINE_2POW 6
-#define CACHELINE ((size_t)(1U << CACHELINE_2POW))
+#define CACHELINE_2POW 6
+#define CACHELINE ((size_t)(1U << CACHELINE_2POW))
// Smallest size class to support. On Windows the smallest allocation size
// must be 8 bytes on 32-bit, 16 bytes on 64-bit. On Linux and Mac, even
// malloc(1) must reserve a word's worth of memory (see Mozilla bug 691003).
#ifdef XP_WIN
-#define TINY_MIN_2POW (sizeof(void*) == 8 ? 4 : 3)
+#define TINY_MIN_2POW (sizeof(void*) == 8 ? 4 : 3)
#else
-#define TINY_MIN_2POW (sizeof(void*) == 8 ? 3 : 2)
+#define TINY_MIN_2POW (sizeof(void*) == 8 ? 3 : 2)
#endif
// Maximum size class that is a multiple of the quantum, but not (necessarily)
// a power of 2. Above this size, allocations are rounded up to the nearest
// power of 2.
-#define SMALL_MAX_2POW_DEFAULT 9
-#define SMALL_MAX_DEFAULT (1U << SMALL_MAX_2POW_DEFAULT)
+#define SMALL_MAX_2POW_DEFAULT 9
+#define SMALL_MAX_DEFAULT (1U << SMALL_MAX_2POW_DEFAULT)
// RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized
// as small as possible such that this setting is still honored, without
// violating other constraints. The goal is to make runs as small as possible
// without exceeding a per run external fragmentation threshold.
//
// We use binary fixed point math for overhead computations, where the binary
// point is implicitly RUN_BFP bits to the left.
//
// Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
// honored for some/all object sizes, since there is one bit of header overhead
// per object (plus a constant). This constraint is relaxed (ignored) for runs
// that are so small that the per-region overhead is greater than:
//
// (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
-#define RUN_BFP 12
+#define RUN_BFP 12
// \/ Implicit binary fixed point.
-#define RUN_MAX_OVRHD 0x0000003dU
-#define RUN_MAX_OVRHD_RELAX 0x00001800U
+#define RUN_MAX_OVRHD 0x0000003dU
+#define RUN_MAX_OVRHD_RELAX 0x00001800U
// When MALLOC_STATIC_PAGESIZE is defined, the page size is fixed at
// compile-time for better performance, as opposed to determined at
// runtime. Some platforms can have different page sizes at runtime
// depending on kernel configuration, so they are opted out by default.
// Debug builds are opted out too, for test coverage.
#ifndef MOZ_DEBUG
-#if !defined(__ia64__) && !defined(__sparc__) && !defined(__mips__) && !defined(__aarch64__)
+#if !defined(__ia64__) && !defined(__sparc__) && !defined(__mips__) && \
+ !defined(__aarch64__)
#define MALLOC_STATIC_PAGESIZE 1
#endif
#endif
// Various quantum-related settings.
-#define QUANTUM_DEFAULT (size_t(1) << QUANTUM_2POW_MIN)
+#define QUANTUM_DEFAULT (size_t(1) << QUANTUM_2POW_MIN)
static const size_t quantum = QUANTUM_DEFAULT;
static const size_t quantum_mask = QUANTUM_DEFAULT - 1;
// Various bin-related settings.
static const size_t small_min = (QUANTUM_DEFAULT >> 1) + 1;
static const size_t small_max = size_t(SMALL_MAX_DEFAULT);
// Number of (2^n)-spaced tiny bins.
@@ -390,63 +389,64 @@ static const unsigned ntbins = unsigned(
// Number of quantum-spaced bins.
static const unsigned nqbins = unsigned(SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN);
#ifdef MALLOC_STATIC_PAGESIZE
// VM page size. It must divide the runtime CPU page size or the code
// will abort.
// Platform specific page size conditions copied from js/public/HeapAPI.h
-#if (defined(SOLARIS) || defined(__FreeBSD__)) && \
- (defined(__sparc) || defined(__sparcv9) || defined(__ia64))
+#if (defined(SOLARIS) || defined(__FreeBSD__)) && \
+ (defined(__sparc) || defined(__sparcv9) || defined(__ia64))
#define pagesize_2pow (size_t(13))
#elif defined(__powerpc64__)
#define pagesize_2pow (size_t(16))
#else
#define pagesize_2pow (size_t(12))
#endif
#define pagesize (size_t(1) << pagesize_2pow)
#define pagesize_mask (pagesize - 1)
// Max size class for bins.
static const size_t bin_maxclass = pagesize >> 1;
// Number of (2^n)-spaced sub-page bins.
-static const unsigned nsbins = unsigned(pagesize_2pow - SMALL_MAX_2POW_DEFAULT - 1);
+static const unsigned nsbins =
+ unsigned(pagesize_2pow - SMALL_MAX_2POW_DEFAULT - 1);
#else // !MALLOC_STATIC_PAGESIZE
// VM page size.
static size_t pagesize;
static size_t pagesize_mask;
static size_t pagesize_2pow;
// Various bin-related settings.
static size_t bin_maxclass; // Max size class for bins.
-static unsigned nsbins; // Number of (2^n)-spaced sub-page bins.
+static unsigned nsbins; // Number of (2^n)-spaced sub-page bins.
#endif
// Various chunk-related settings.
// Compute the header size such that it is large enough to contain the page map
// and enough nodes for the worst case: one node per non-header page plus one
// extra for situations where we briefly have one more node allocated than we
// will need.
-#define calculate_arena_header_size() \
- (sizeof(arena_chunk_t) + sizeof(arena_chunk_map_t) * (chunk_npages - 1))
-
-#define calculate_arena_header_pages() \
- ((calculate_arena_header_size() >> pagesize_2pow) + \
- ((calculate_arena_header_size() & pagesize_mask) ? 1 : 0))
+#define calculate_arena_header_size() \
+ (sizeof(arena_chunk_t) + sizeof(arena_chunk_map_t) * (chunk_npages - 1))
+
+#define calculate_arena_header_pages() \
+ ((calculate_arena_header_size() >> pagesize_2pow) + \
+ ((calculate_arena_header_size() & pagesize_mask) ? 1 : 0))
// Max size class for arenas.
-#define calculate_arena_maxclass() \
- (chunksize - (arena_chunk_header_npages << pagesize_2pow))
-
-#define CHUNKSIZE_DEFAULT ((size_t) 1 << CHUNK_2POW_DEFAULT)
+#define calculate_arena_maxclass() \
+ (chunksize - (arena_chunk_header_npages << pagesize_2pow))
+
+#define CHUNKSIZE_DEFAULT ((size_t)1 << CHUNK_2POW_DEFAULT)
static const size_t chunksize = CHUNKSIZE_DEFAULT;
static const size_t chunksize_mask = CHUNKSIZE_DEFAULT - 1;
#ifdef MALLOC_STATIC_PAGESIZE
static const size_t chunk_npages = CHUNKSIZE_DEFAULT >> pagesize_2pow;
#define arena_chunk_header_npages calculate_arena_header_pages()
#define arena_maxclass calculate_arena_maxclass()
#else
@@ -488,17 +488,17 @@ struct Mutex
inline void Lock();
inline void Unlock();
};
struct MOZ_RAII MutexAutoLock
{
explicit MutexAutoLock(Mutex& aMutex MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
- : mMutex(aMutex)
+ : mMutex(aMutex)
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
mMutex.Lock();
}
~MutexAutoLock() { mMutex.Unlock(); }
private:
@@ -517,61 +517,65 @@ static Mutex gInitLock = { OS_SPINLOCK_I
static Mutex gInitLock = { PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP };
#else
static Mutex gInitLock = { PTHREAD_MUTEX_INITIALIZER };
#endif
// ***************************************************************************
// Statistics data structures.
-struct malloc_bin_stats_t {
- // Current number of runs in this bin.
- unsigned long curruns;
+struct malloc_bin_stats_t
+{
+ // Current number of runs in this bin.
+ unsigned long curruns;
};
-struct arena_stats_t {
- // Number of bytes currently mapped.
- size_t mapped;
-
- // Current number of committed pages.
- size_t committed;
-
- // Per-size-category statistics.
- size_t allocated_small;
-
- size_t allocated_large;
+struct arena_stats_t
+{
+ // Number of bytes currently mapped.
+ size_t mapped;
+
+ // Current number of committed pages.
+ size_t committed;
+
+ // Per-size-category statistics.
+ size_t allocated_small;
+
+ size_t allocated_large;
};
// ***************************************************************************
// Extent data structures.
-enum ChunkType {
+enum ChunkType
+{
UNKNOWN_CHUNK,
ZEROED_CHUNK, // chunk only contains zeroes.
ARENA_CHUNK, // used to back arena runs created by arena_t::AllocRun.
HUGE_CHUNK, // used to back huge allocations (e.g. huge_malloc).
RECYCLED_CHUNK, // chunk has been stored for future use by chunk_recycle.
};
// Tree of extents.
-struct extent_node_t {
- // Linkage for the size/address-ordered tree.
- RedBlackTreeNode<extent_node_t> link_szad;
-
- // Linkage for the address-ordered tree.
- RedBlackTreeNode<extent_node_t> link_ad;
-
- // Pointer to the extent that this tree node is responsible for.
- void *addr;
-
- // Total region size.
- size_t size;
-
- // What type of chunk is there; used by chunk recycling code.
- ChunkType chunk_type;
+struct extent_node_t
+{
+ // Linkage for the size/address-ordered tree.
+ RedBlackTreeNode<extent_node_t> link_szad;
+
+ // Linkage for the address-ordered tree.
+ RedBlackTreeNode<extent_node_t> link_ad;
+
+ // Pointer to the extent that this tree node is responsible for.
+ void* addr;
+
+ // Total region size.
+ size_t size;
+
+ // What type of chunk is there; used by chunk recycling code.
+ ChunkType chunk_type;
};
template<typename T>
int
CompareAddr(T* aAddr1, T* aAddr2)
{
uintptr_t addr1 = reinterpret_cast<uintptr_t>(aAddr1);
uintptr_t addr2 = reinterpret_cast<uintptr_t>(aAddr2);
@@ -631,20 +635,21 @@ struct ExtentTreeBoundsTrait : public Ex
//
// An address is looked up by splitting it in kBitsPerLevel bit chunks, except
// the most significant bits, where the bit chunk is kBitsAtLevel1 which can be
// different if Bits is not a multiple of kBitsPerLevel.
//
// With e.g. sizeof(void*)=4, Bits=16 and kBitsPerLevel=8, an address is split
// like the following:
// 0x12345678 -> mRoot[0x12][0x34]
-template <size_t Bits>
-class AddressRadixTree {
- // Size of each radix tree node (as a power of 2).
- // This impacts tree depth.
+template<size_t Bits>
+class AddressRadixTree
+{
+// Size of each radix tree node (as a power of 2).
+// This impacts tree depth.
#if (SIZEOF_PTR == 4)
static const size_t kNodeSize2Pow = 14;
#else
static const size_t kNodeSize2Pow = CACHELINE_2POW;
#endif
static const size_t kBitsPerLevel = kNodeSize2Pow - SIZEOF_PTR_2POW;
static const size_t kBitsAtLevel1 =
(Bits % kBitsPerLevel) ? Bits % kBitsPerLevel : kBitsPerLevel;
@@ -658,110 +663,110 @@ class AddressRadixTree {
public:
bool Init();
inline void* Get(void* aAddr);
// Returns whether the value was properly set.
inline bool Set(void* aAddr, void* aValue);
- inline bool Unset(void* aAddr)
- {
- return Set(aAddr, nullptr);
- }
+ inline bool Unset(void* aAddr) { return Set(aAddr, nullptr); }
private:
inline void** GetSlot(void* aAddr, bool aCreate = false);
};
// ***************************************************************************
// Arena data structures.
struct arena_t;
struct arena_bin_t;
// Each element of the chunk map corresponds to one page within the chunk.
-struct arena_chunk_map_t {
- // Linkage for run trees. There are two disjoint uses:
- //
- // 1) arena_t's tree or available runs.
- // 2) arena_run_t conceptually uses this linkage for in-use non-full
- // runs, rather than directly embedding linkage.
- RedBlackTreeNode<arena_chunk_map_t> link;
-
- // Run address (or size) and various flags are stored together. The bit
- // layout looks like (assuming 32-bit system):
- //
- // ???????? ???????? ????---- -mckdzla
- //
- // ? : Unallocated: Run address for first/last pages, unset for internal
- // pages.
- // Small: Run address.
- // Large: Run size for first page, unset for trailing pages.
- // - : Unused.
- // m : MADV_FREE/MADV_DONTNEED'ed?
- // c : decommitted?
- // k : key?
- // d : dirty?
- // z : zeroed?
- // l : large?
- // a : allocated?
- //
- // Following are example bit patterns for the three types of runs.
- //
- // r : run address
- // s : run size
- // x : don't care
- // - : 0
- // [cdzla] : bit set
- //
- // Unallocated:
- // ssssssss ssssssss ssss---- --c-----
- // xxxxxxxx xxxxxxxx xxxx---- ----d---
- // ssssssss ssssssss ssss---- -----z--
- //
- // Small:
- // rrrrrrrr rrrrrrrr rrrr---- -------a
- // rrrrrrrr rrrrrrrr rrrr---- -------a
- // rrrrrrrr rrrrrrrr rrrr---- -------a
- //
- // Large:
- // ssssssss ssssssss ssss---- ------la
- // -------- -------- -------- ------la
- // -------- -------- -------- ------la
- size_t bits;
+struct arena_chunk_map_t
+{
+ // Linkage for run trees. There are two disjoint uses:
+ //
+ // 1) arena_t's tree or available runs.
+ // 2) arena_run_t conceptually uses this linkage for in-use non-full
+ // runs, rather than directly embedding linkage.
+ RedBlackTreeNode<arena_chunk_map_t> link;
+
+ // Run address (or size) and various flags are stored together. The bit
+ // layout looks like (assuming 32-bit system):
+ //
+ // ???????? ???????? ????---- -mckdzla
+ //
+ // ? : Unallocated: Run address for first/last pages, unset for internal
+ // pages.
+ // Small: Run address.
+ // Large: Run size for first page, unset for trailing pages.
+ // - : Unused.
+ // m : MADV_FREE/MADV_DONTNEED'ed?
+ // c : decommitted?
+ // k : key?
+ // d : dirty?
+ // z : zeroed?
+ // l : large?
+ // a : allocated?
+ //
+ // Following are example bit patterns for the three types of runs.
+ //
+ // r : run address
+ // s : run size
+ // x : don't care
+ // - : 0
+ // [cdzla] : bit set
+ //
+ // Unallocated:
+ // ssssssss ssssssss ssss---- --c-----
+ // xxxxxxxx xxxxxxxx xxxx---- ----d---
+ // ssssssss ssssssss ssss---- -----z--
+ //
+ // Small:
+ // rrrrrrrr rrrrrrrr rrrr---- -------a
+ // rrrrrrrr rrrrrrrr rrrr---- -------a
+ // rrrrrrrr rrrrrrrr rrrr---- -------a
+ //
+ // Large:
+ // ssssssss ssssssss ssss---- ------la
+ // -------- -------- -------- ------la
+ // -------- -------- -------- ------la
+ size_t bits;
// Note that CHUNK_MAP_DECOMMITTED's meaning varies depending on whether
// MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are defined.
//
// If MALLOC_DECOMMIT is defined, a page which is CHUNK_MAP_DECOMMITTED must be
// re-committed with pages_commit() before it may be touched. If
// MALLOC_DECOMMIT is defined, MALLOC_DOUBLE_PURGE may not be defined.
//
// If neither MALLOC_DECOMMIT nor MALLOC_DOUBLE_PURGE is defined, pages which
// are madvised (with either MADV_DONTNEED or MADV_FREE) are marked with
// CHUNK_MAP_MADVISED.
//
// Otherwise, if MALLOC_DECOMMIT is not defined and MALLOC_DOUBLE_PURGE is
// defined, then a page which is madvised is marked as CHUNK_MAP_MADVISED.
// When it's finally freed with jemalloc_purge_freed_pages, the page is marked
// as CHUNK_MAP_DECOMMITTED.
-#define CHUNK_MAP_MADVISED ((size_t)0x40U)
-#define CHUNK_MAP_DECOMMITTED ((size_t)0x20U)
-#define CHUNK_MAP_MADVISED_OR_DECOMMITTED (CHUNK_MAP_MADVISED | CHUNK_MAP_DECOMMITTED)
-#define CHUNK_MAP_KEY ((size_t)0x10U)
-#define CHUNK_MAP_DIRTY ((size_t)0x08U)
-#define CHUNK_MAP_ZEROED ((size_t)0x04U)
-#define CHUNK_MAP_LARGE ((size_t)0x02U)
-#define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
+#define CHUNK_MAP_MADVISED ((size_t)0x40U)
+#define CHUNK_MAP_DECOMMITTED ((size_t)0x20U)
+#define CHUNK_MAP_MADVISED_OR_DECOMMITTED \
+ (CHUNK_MAP_MADVISED | CHUNK_MAP_DECOMMITTED)
+#define CHUNK_MAP_KEY ((size_t)0x10U)
+#define CHUNK_MAP_DIRTY ((size_t)0x08U)
+#define CHUNK_MAP_ZEROED ((size_t)0x04U)
+#define CHUNK_MAP_LARGE ((size_t)0x02U)
+#define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
};
struct ArenaChunkMapLink
{
- static RedBlackTreeNode<arena_chunk_map_t>& GetTreeNode(arena_chunk_map_t* aThis)
+ static RedBlackTreeNode<arena_chunk_map_t>& GetTreeNode(
+ arena_chunk_map_t* aThis)
{
return aThis->link;
}
};
struct ArenaRunTreeTrait : public ArenaChunkMapLink
{
static inline int Compare(arena_chunk_map_t* aNode, arena_chunk_map_t* aOther)
@@ -774,43 +779,46 @@ struct ArenaRunTreeTrait : public ArenaC
struct ArenaAvailTreeTrait : public ArenaChunkMapLink
{
static inline int Compare(arena_chunk_map_t* aNode, arena_chunk_map_t* aOther)
{
size_t size1 = aNode->bits & ~pagesize_mask;
size_t size2 = aOther->bits & ~pagesize_mask;
int ret = (size1 > size2) - (size1 < size2);
- return ret ? ret : CompareAddr((aNode->bits & CHUNK_MAP_KEY) ? nullptr : aNode, aOther);
+ return ret ? ret
+ : CompareAddr((aNode->bits & CHUNK_MAP_KEY) ? nullptr : aNode,
+ aOther);
}
};
// Arena chunk header.
-struct arena_chunk_t {
- // Arena that owns the chunk.
- arena_t *arena;
-
- // Linkage for the arena's tree of dirty chunks.
- RedBlackTreeNode<arena_chunk_t> link_dirty;
+struct arena_chunk_t
+{
+ // Arena that owns the chunk.
+ arena_t* arena;
+
+ // Linkage for the arena's tree of dirty chunks.
+ RedBlackTreeNode<arena_chunk_t> link_dirty;
#ifdef MALLOC_DOUBLE_PURGE
- // If we're double-purging, we maintain a linked list of chunks which
- // have pages which have been madvise(MADV_FREE)'d but not explicitly
- // purged.
- //
- // We're currently lazy and don't remove a chunk from this list when
- // all its madvised pages are recommitted.
- mozilla::DoublyLinkedListElement<arena_chunk_t> chunks_madvised_elem;
+ // If we're double-purging, we maintain a linked list of chunks which
+ // have pages which have been madvise(MADV_FREE)'d but not explicitly
+ // purged.
+ //
+ // We're currently lazy and don't remove a chunk from this list when
+ // all its madvised pages are recommitted.
+ mozilla::DoublyLinkedListElement<arena_chunk_t> chunks_madvised_elem;
#endif
- // Number of dirty pages.
- size_t ndirty;
-
- // Map of pages within chunk that keeps track of free/large/small.
- arena_chunk_map_t map[1]; // Dynamically sized.
+ // Number of dirty pages.
+ size_t ndirty;
+
+ // Map of pages within chunk that keeps track of free/large/small.
+ arena_chunk_map_t map[1]; // Dynamically sized.
};
struct ArenaDirtyChunkTrait
{
static RedBlackTreeNode<arena_chunk_t>& GetTreeNode(arena_chunk_t* aThis)
{
return aThis->link_dirty;
}
@@ -829,74 +837,76 @@ namespace mozilla {
template<>
struct GetDoublyLinkedListElement<arena_chunk_t>
{
static DoublyLinkedListElement<arena_chunk_t>& Get(arena_chunk_t* aThis)
{
return aThis->chunks_madvised_elem;
}
};
-
}
#endif
-struct arena_run_t {
+struct arena_run_t
+{
#if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
- uint32_t magic;
-# define ARENA_RUN_MAGIC 0x384adf93
+ uint32_t magic;
+#define ARENA_RUN_MAGIC 0x384adf93
#endif
- // Bin this run is associated with.
- arena_bin_t *bin;
-
- // Index of first element that might have a free region.
- unsigned regs_minelm;
-
- // Number of free regions in run.
- unsigned nfree;
-
- // Bitmask of in-use regions (0: in use, 1: free).
- unsigned regs_mask[1]; // Dynamically sized.
+ // Bin this run is associated with.
+ arena_bin_t* bin;
+
+ // Index of first element that might have a free region.
+ unsigned regs_minelm;
+
+ // Number of free regions in run.
+ unsigned nfree;
+
+ // Bitmask of in-use regions (0: in use, 1: free).
+ unsigned regs_mask[1]; // Dynamically sized.
};
-struct arena_bin_t {
- // Current run being used to service allocations of this bin's size
- // class.
- arena_run_t *runcur;
-
- // Tree of non-full runs. This tree is used when looking for an
- // existing run when runcur is no longer usable. We choose the
- // non-full run that is lowest in memory; this policy tends to keep
- // objects packed well, and it can also help reduce the number of
- // almost-empty chunks.
- RedBlackTree<arena_chunk_map_t, ArenaRunTreeTrait> runs;
-
- // Size of regions in a run for this bin's size class.
- size_t reg_size;
-
- // Total size of a run for this bin's size class.
- size_t run_size;
-
- // Total number of regions in a run for this bin's size class.
- uint32_t nregs;
-
- // Number of elements in a run's regs_mask for this bin's size class.
- uint32_t regs_mask_nelms;
-
- // Offset of first region in a run for this bin's size class.
- uint32_t reg0_offset;
-
- // Bin statistics.
- malloc_bin_stats_t stats;
+struct arena_bin_t
+{
+ // Current run being used to service allocations of this bin's size
+ // class.
+ arena_run_t* runcur;
+
+ // Tree of non-full runs. This tree is used when looking for an
+ // existing run when runcur is no longer usable. We choose the
+ // non-full run that is lowest in memory; this policy tends to keep
+ // objects packed well, and it can also help reduce the number of
+ // almost-empty chunks.
+ RedBlackTree<arena_chunk_map_t, ArenaRunTreeTrait> runs;
+
+ // Size of regions in a run for this bin's size class.
+ size_t reg_size;
+
+ // Total size of a run for this bin's size class.
+ size_t run_size;
+
+ // Total number of regions in a run for this bin's size class.
+ uint32_t nregs;
+
+ // Number of elements in a run's regs_mask for this bin's size class.
+ uint32_t regs_mask_nelms;
+
+ // Offset of first region in a run for this bin's size class.
+ uint32_t reg0_offset;
+
+ // Bin statistics.
+ malloc_bin_stats_t stats;
};
-struct arena_t {
+struct arena_t
+{
#if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
uint32_t mMagic;
-# define ARENA_MAGIC 0x947d3d24
+#define ARENA_MAGIC 0x947d3d24
#endif
arena_id_t mId;
// Linkage for the tree of arenas by id.
RedBlackTreeNode<arena_t> mLink;
// All operations on this arena require that lock be locked.
Mutex mLock;
@@ -966,48 +976,69 @@ public:
static inline arena_t* GetById(arena_id_t aArenaId);
private:
void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
void DeallocChunk(arena_chunk_t* aChunk);
- arena_run_t* AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero);
+ arena_run_t* AllocRun(arena_bin_t* aBin,
+ size_t aSize,
+ bool aLarge,
+ bool aZero);
void DallocRun(arena_run_t* aRun, bool aDirty);
- MOZ_MUST_USE bool SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero);
-
- void TrimRunHead(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize, size_t aNewSize);
-
- void TrimRunTail(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize, size_t aNewSize, bool dirty);
+ MOZ_MUST_USE bool SplitRun(arena_run_t* aRun,
+ size_t aSize,
+ bool aLarge,
+ bool aZero);
+
+ void TrimRunHead(arena_chunk_t* aChunk,
+ arena_run_t* aRun,
+ size_t aOldSize,
+ size_t aNewSize);
+
+ void TrimRunTail(arena_chunk_t* aChunk,
+ arena_run_t* aRun,
+ size_t aOldSize,
+ size_t aNewSize,
+ bool dirty);
inline void* MallocBinEasy(arena_bin_t* aBin, arena_run_t* aRun);
void* MallocBinHard(arena_bin_t* aBin);
arena_run_t* GetNonFullBinRun(arena_bin_t* aBin);
inline void* MallocSmall(size_t aSize, bool aZero);
void* MallocLarge(size_t aSize, bool aZero);
public:
inline void* Malloc(size_t aSize, bool aZero);
void* Palloc(size_t aAlignment, size_t aSize, size_t aAllocSize);
- inline void DallocSmall(arena_chunk_t* aChunk, void* aPtr, arena_chunk_map_t *aMapElm);
+ inline void DallocSmall(arena_chunk_t* aChunk,
+ void* aPtr,
+ arena_chunk_map_t* aMapElm);
void DallocLarge(arena_chunk_t* aChunk, void* aPtr);
- void RallocShrinkLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize, size_t aOldSize);
-
- bool RallocGrowLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize, size_t aOldSize);
+ void RallocShrinkLarge(arena_chunk_t* aChunk,
+ void* aPtr,
+ size_t aSize,
+ size_t aOldSize);
+
+ bool RallocGrowLarge(arena_chunk_t* aChunk,
+ void* aPtr,
+ size_t aSize,
+ size_t aOldSize);
void Purge(bool aAll);
void HardPurge();
};
struct ArenaTreeTrait
{
@@ -1040,34 +1071,34 @@ static RedBlackTree<extent_node_t, Exten
// Protects huge allocation-related data structures.
static Mutex huge_mtx;
// Tree of chunks that are stand-alone huge allocations.
static RedBlackTree<extent_node_t, ExtentTreeTrait> huge;
// Huge allocation statistics.
-static size_t huge_allocated;
-static size_t huge_mapped;
+static size_t huge_allocated;
+static size_t huge_mapped;
// **************************
// base (internal allocation).
// Current pages that are being used for internal memory allocations. These
// pages are carved up in cacheline-size quanta, so that there is no chance of
// false cache line sharing.
-static void *base_pages;
-static void *base_next_addr;
-static void *base_next_decommitted;
-static void *base_past_addr; // Addr immediately past base_pages.
-static extent_node_t *base_nodes;
+static void* base_pages;
+static void* base_next_addr;
+static void* base_next_decommitted;
+static void* base_past_addr; // Addr immediately past base_pages.
+static extent_node_t* base_nodes;
static Mutex base_mtx;
-static size_t base_mapped;
-static size_t base_committed;
+static size_t base_mapped;
+static size_t base_committed;
// ******
// Arenas.
// A tree of all available arenas, arranged by id.
// TODO: Move into arena_t as a static member when rb_tree doesn't depend on
// the type being defined anymore.
static RedBlackTree<arena_t, ArenaTreeTrait> gArenaTree;
@@ -1076,100 +1107,117 @@ static Mutex arenas_lock; // Protects ar
// The arena associated with the current thread (per jemalloc_thread_local_arena)
// On OSX, __thread/thread_local circles back calling malloc to allocate storage
// on first access on each thread, which leads to an infinite loop, but
// pthread-based TLS somehow doesn't have this problem.
#if !defined(XP_DARWIN)
static MOZ_THREAD_LOCAL(arena_t*) thread_arena;
#else
-static mozilla::detail::ThreadLocal<arena_t*, mozilla::detail::ThreadLocalKeyStorage> thread_arena;
+static mozilla::detail::ThreadLocal<arena_t*,
+ mozilla::detail::ThreadLocalKeyStorage>
+ thread_arena;
#endif
// The main arena, which all threads default to until jemalloc_thread_local_arena
// is called.
-static arena_t *gMainArena;
+static arena_t* gMainArena;
// *****************************
// Runtime configuration options.
const uint8_t kAllocJunk = 0xe4;
const uint8_t kAllocPoison = 0xe5;
#ifdef MOZ_DEBUG
-static bool opt_junk = true;
-static bool opt_zero = false;
+static bool opt_junk = true;
+static bool opt_zero = false;
#else
-static const bool opt_junk = false;
-static const bool opt_zero = false;
+static const bool opt_junk = false;
+static const bool opt_zero = false;
#endif
-static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
+static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
// ***************************************************************************
// Begin forward declarations.
-static void* chunk_alloc(size_t aSize, size_t aAlignment, bool aBase, bool* aZeroed = nullptr);
-static void chunk_dealloc(void* aChunk, size_t aSize, ChunkType aType);
-static void chunk_ensure_zero(void* aPtr, size_t aSize, bool aZeroed);
-static arena_t *arenas_extend();
-static void *huge_malloc(size_t size, bool zero);
-static void* huge_palloc(size_t aSize, size_t aAlignment, bool aZero);
-static void* huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize);
-static void huge_dalloc(void* aPtr);
+static void*
+chunk_alloc(size_t aSize,
+ size_t aAlignment,
+ bool aBase,
+ bool* aZeroed = nullptr);
+static void
+chunk_dealloc(void* aChunk, size_t aSize, ChunkType aType);
+static void
+chunk_ensure_zero(void* aPtr, size_t aSize, bool aZeroed);
+static arena_t*
+arenas_extend();
+static void*
+huge_malloc(size_t size, bool zero);
+static void*
+huge_palloc(size_t aSize, size_t aAlignment, bool aZero);
+static void*
+huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize);
+static void
+huge_dalloc(void* aPtr);
#ifdef XP_WIN
extern "C"
#else
static
#endif
-bool malloc_init_hard(void);
+ bool
+ malloc_init_hard(void);
#ifdef XP_DARWIN
#define FORK_HOOK extern "C"
#else
#define FORK_HOOK static
#endif
-FORK_HOOK void _malloc_prefork(void);
-FORK_HOOK void _malloc_postfork_parent(void);
-FORK_HOOK void _malloc_postfork_child(void);
+FORK_HOOK void
+_malloc_prefork(void);
+FORK_HOOK void
+_malloc_postfork_parent(void);
+FORK_HOOK void
+_malloc_postfork_child(void);
// End forward declarations.
// ***************************************************************************
static void
-_malloc_message(const char *p)
+_malloc_message(const char* p)
{
#if !defined(XP_WIN)
-#define _write write
+#define _write write
#endif
// Pretend to check _write() errors to suppress gcc warnings about
// warn_unused_result annotations in some versions of glibc headers.
- if (_write(STDERR_FILENO, p, (unsigned int) strlen(p)) < 0) {
+ if (_write(STDERR_FILENO, p, (unsigned int)strlen(p)) < 0) {
return;
}
}
-template <typename... Args>
+template<typename... Args>
static void
-_malloc_message(const char *p, Args... args)
+_malloc_message(const char* p, Args... args)
{
_malloc_message(p);
_malloc_message(args...);
}
#include "mozilla/Assertions.h"
#include "mozilla/Attributes.h"
#include "mozilla/TaggedAnonymousMemory.h"
-// Note: MozTaggedAnonymousMmap() could call an LD_PRELOADed mmap
-// instead of the one defined here; use only MozTagAnonymousMemory().
+ // Note: MozTaggedAnonymousMmap() could call an LD_PRELOADed mmap
+ // instead of the one defined here; use only MozTagAnonymousMemory().
#ifdef ANDROID
// Android's pthread.h does not declare pthread_atfork() until SDK 21.
-extern "C" MOZ_EXPORT
-int pthread_atfork(void (*)(void), void (*)(void), void(*)(void));
+extern "C" MOZ_EXPORT int
+pthread_atfork(void (*)(void), void (*)(void), void (*)(void));
#endif
// ***************************************************************************
// Begin mutex. We can't use normal pthread mutexes in all places, because
// they require malloc()ed memory, which causes bootstrapping issues in some
// cases. We also can't use constructors, because for statics, they would fire
// after the first use of malloc, resetting the locks.
@@ -1240,54 +1288,50 @@ GetChunkForPtr(const void* aPtr)
// Return the chunk offset of address a.
static inline size_t
GetChunkOffsetForPtr(const void* aPtr)
{
return (size_t)(uintptr_t(aPtr) & chunksize_mask);
}
// Return the smallest chunk multiple that is >= s.
-#define CHUNK_CEILING(s) \
- (((s) + chunksize_mask) & ~chunksize_mask)
+#define CHUNK_CEILING(s) (((s) + chunksize_mask) & ~chunksize_mask)
// Return the smallest cacheline multiple that is >= s.
-#define CACHELINE_CEILING(s) \
- (((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
+#define CACHELINE_CEILING(s) (((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
// Return the smallest quantum multiple that is >= a.
-#define QUANTUM_CEILING(a) \
- (((a) + quantum_mask) & ~quantum_mask)
+#define QUANTUM_CEILING(a) (((a) + quantum_mask) & ~quantum_mask)
// Return the smallest pagesize multiple that is >= s.
-#define PAGE_CEILING(s) \
- (((s) + pagesize_mask) & ~pagesize_mask)
+#define PAGE_CEILING(s) (((s) + pagesize_mask) & ~pagesize_mask)
// Compute the smallest power of 2 that is >= x.
static inline size_t
pow2_ceil(size_t x)
{
- x--;
- x |= x >> 1;
- x |= x >> 2;
- x |= x >> 4;
- x |= x >> 8;
- x |= x >> 16;
+ x--;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
#if (SIZEOF_PTR == 8)
- x |= x >> 32;
+ x |= x >> 32;
#endif
- x++;
- return x;
+ x++;
+ return x;
}
-static inline const char *
+static inline const char*
_getprogname(void)
{
- return "<jemalloc>";
+ return "<jemalloc>";
}
// ***************************************************************************
static inline void
pages_decommit(void* aAddr, size_t aSize)
{
#ifdef XP_WIN
@@ -1300,17 +1344,18 @@ pages_decommit(void* aAddr, size_t aSize
if (!VirtualFree(aAddr, pages_size, MEM_DECOMMIT)) {
MOZ_CRASH();
}
aAddr = (void*)((uintptr_t)aAddr + pages_size);
aSize -= pages_size;
pages_size = std::min(aSize, chunksize);
}
#else
- if (mmap(aAddr, aSize, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) ==
+ if (mmap(
+ aAddr, aSize, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) ==
MAP_FAILED) {
MOZ_CRASH();
}
MozTagAnonymousMemory(aAddr, aSize, "jemalloc-decommitted");
#endif
}
// Commit pages. Returns whether pages were committed.
@@ -1343,40 +1388,40 @@ pages_commit(void* aAddr, size_t aSize)
MozTagAnonymousMemory(aAddr, aSize, "jemalloc");
#endif
return true;
}
static bool
base_pages_alloc(size_t minsize)
{
- size_t csize;
- size_t pminsize;
-
- MOZ_ASSERT(minsize != 0);
- csize = CHUNK_CEILING(minsize);
- base_pages = chunk_alloc(csize, chunksize, true);
- if (!base_pages) {
- return true;
- }
- base_next_addr = base_pages;
- base_past_addr = (void *)((uintptr_t)base_pages + csize);
- // Leave enough pages for minsize committed, since otherwise they would
- // have to be immediately recommitted.
- pminsize = PAGE_CEILING(minsize);
- base_next_decommitted = (void *)((uintptr_t)base_pages + pminsize);
-# if defined(MALLOC_DECOMMIT)
- if (pminsize < csize) {
- pages_decommit(base_next_decommitted, csize - pminsize);
- }
-# endif
- base_mapped += csize;
- base_committed += pminsize;
-
- return false;
+ size_t csize;
+ size_t pminsize;
+
+ MOZ_ASSERT(minsize != 0);
+ csize = CHUNK_CEILING(minsize);
+ base_pages = chunk_alloc(csize, chunksize, true);
+ if (!base_pages) {
+ return true;
+ }
+ base_next_addr = base_pages;
+ base_past_addr = (void*)((uintptr_t)base_pages + csize);
+ // Leave enough pages for minsize committed, since otherwise they would
+ // have to be immediately recommitted.
+ pminsize = PAGE_CEILING(minsize);
+ base_next_decommitted = (void*)((uintptr_t)base_pages + pminsize);
+#if defined(MALLOC_DECOMMIT)
+ if (pminsize < csize) {
+ pages_decommit(base_next_decommitted, csize - pminsize);
+ }
+#endif
+ base_mapped += csize;
+ base_committed += pminsize;
+
+ return false;
}
static void*
base_alloc(size_t aSize)
{
void* ret;
size_t csize;
@@ -1392,57 +1437,57 @@ base_alloc(size_t aSize)
}
// Allocate.
ret = base_next_addr;
base_next_addr = (void*)((uintptr_t)base_next_addr + csize);
// Make sure enough pages are committed for the new allocation.
if ((uintptr_t)base_next_addr > (uintptr_t)base_next_decommitted) {
void* pbase_next_addr = (void*)(PAGE_CEILING((uintptr_t)base_next_addr));
-# ifdef MALLOC_DECOMMIT
+#ifdef MALLOC_DECOMMIT
if (!pages_commit(base_next_decommitted,
(uintptr_t)pbase_next_addr -
(uintptr_t)base_next_decommitted)) {
return nullptr;
}
-# endif
+#endif
base_next_decommitted = pbase_next_addr;
- base_committed += (uintptr_t)pbase_next_addr -
- (uintptr_t)base_next_decommitted;
+ base_committed +=
+ (uintptr_t)pbase_next_addr - (uintptr_t)base_next_decommitted;
}
return ret;
}
static void*
base_calloc(size_t aNumber, size_t aSize)
{
void* ret = base_alloc(aNumber * aSize);
if (ret) {
memset(ret, 0, aNumber * aSize);
}
return ret;
}
-static extent_node_t *
+static extent_node_t*
base_node_alloc(void)
{
- extent_node_t *ret;
-
- base_mtx.Lock();
- if (base_nodes) {
- ret = base_nodes;
- base_nodes = *(extent_node_t **)ret;
- base_mtx.Unlock();
- } else {
- base_mtx.Unlock();
- ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
- }
-
- return ret;
+ extent_node_t* ret;
+
+ base_mtx.Lock();
+ if (base_nodes) {
+ ret = base_nodes;
+ base_nodes = *(extent_node_t**)ret;
+ base_mtx.Unlock();
+ } else {
+ base_mtx.Unlock();
+ ret = (extent_node_t*)base_alloc(sizeof(extent_node_t));
+ }
+
+ return ret;
}
static void
base_node_dealloc(extent_node_t* aNode)
{
MutexAutoLock lock(base_mtx);
*(extent_node_t**)aNode = base_nodes;
base_nodes = aNode;
@@ -1456,190 +1501,195 @@ struct BaseNodeFreePolicy
using UniqueBaseNode = mozilla::UniquePtr<extent_node_t, BaseNodeFreePolicy>;
// End Utility functions/macros.
// ***************************************************************************
// Begin chunk management functions.
#ifdef XP_WIN
-static void *
-pages_map(void *aAddr, size_t aSize)
+static void*
+pages_map(void* aAddr, size_t aSize)
{
- void *ret = nullptr;
- ret = VirtualAlloc(aAddr, aSize, MEM_COMMIT | MEM_RESERVE,
- PAGE_READWRITE);
- return ret;
+ void* ret = nullptr;
+ ret = VirtualAlloc(aAddr, aSize, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+ return ret;
}
static void
-pages_unmap(void *aAddr, size_t aSize)
+pages_unmap(void* aAddr, size_t aSize)
{
- if (VirtualFree(aAddr, 0, MEM_RELEASE) == 0) {
- _malloc_message(_getprogname(),
- ": (malloc) Error in VirtualFree()\n");
- }
+ if (VirtualFree(aAddr, 0, MEM_RELEASE) == 0) {
+ _malloc_message(_getprogname(), ": (malloc) Error in VirtualFree()\n");
+ }
}
#else
static void
-pages_unmap(void *aAddr, size_t aSize)
+pages_unmap(void* aAddr, size_t aSize)
{
- if (munmap(aAddr, aSize) == -1) {
- char buf[STRERROR_BUF];
-
- if (strerror_r(errno, buf, sizeof(buf)) == 0) {
- _malloc_message(_getprogname(),
- ": (malloc) Error in munmap(): ", buf, "\n");
- }
- }
+ if (munmap(aAddr, aSize) == -1) {
+ char buf[STRERROR_BUF];
+
+ if (strerror_r(errno, buf, sizeof(buf)) == 0) {
+ _malloc_message(
+ _getprogname(), ": (malloc) Error in munmap(): ", buf, "\n");
+ }
+ }
}
-static void *
-pages_map(void *aAddr, size_t aSize)
+static void*
+pages_map(void* aAddr, size_t aSize)
{
- void *ret;
-#if defined(__ia64__) || (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
- // The JS engine assumes that all allocated pointers have their high 17 bits clear,
- // which ia64's mmap doesn't support directly. However, we can emulate it by passing
- // mmap an "addr" parameter with those bits clear. The mmap will return that address,
- // or the nearest available memory above that address, providing a near-guarantee
- // that those bits are clear. If they are not, we return nullptr below to indicate
- // out-of-memory.
- //
- // The addr is chosen as 0x0000070000000000, which still allows about 120TB of virtual
- // address space.
- //
- // See Bug 589735 for more information.
- bool check_placement = true;
- if (!aAddr) {
- aAddr = (void*)0x0000070000000000;
- check_placement = false;
- }
+ void* ret;
+#if defined(__ia64__) || \
+ (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
+ // The JS engine assumes that all allocated pointers have their high 17 bits clear,
+ // which ia64's mmap doesn't support directly. However, we can emulate it by passing
+ // mmap an "addr" parameter with those bits clear. The mmap will return that address,
+ // or the nearest available memory above that address, providing a near-guarantee
+ // that those bits are clear. If they are not, we return nullptr below to indicate
+ // out-of-memory.
+ //
+ // The addr is chosen as 0x0000070000000000, which still allows about 120TB of virtual
+ // address space.
+ //
+ // See Bug 589735 for more information.
+ bool check_placement = true;
+ if (!aAddr) {
+ aAddr = (void*)0x0000070000000000;
+ check_placement = false;
+ }
#endif
#if defined(__sparc__) && defined(__arch64__) && defined(__linux__)
- const uintptr_t start = 0x0000070000000000ULL;
- const uintptr_t end = 0x0000800000000000ULL;
-
- // Copied from js/src/gc/Memory.cpp and adapted for this source
- uintptr_t hint;
- void* region = MAP_FAILED;
- for (hint = start; region == MAP_FAILED && hint + aSize <= end; hint += chunksize) {
- region = mmap((void*)hint, aSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
- if (region != MAP_FAILED) {
- if (((size_t) region + (aSize - 1)) & 0xffff800000000000) {
- if (munmap(region, aSize)) {
- MOZ_ASSERT(errno == ENOMEM);
- }
- region = MAP_FAILED;
- }
- }
+ const uintptr_t start = 0x0000070000000000ULL;
+ const uintptr_t end = 0x0000800000000000ULL;
+
+ // Copied from js/src/gc/Memory.cpp and adapted for this source
+ uintptr_t hint;
+ void* region = MAP_FAILED;
+ for (hint = start; region == MAP_FAILED && hint + aSize <= end;
+ hint += chunksize) {
+ region = mmap((void*)hint,
+ aSize,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON,
+ -1,
+ 0);
+ if (region != MAP_FAILED) {
+ if (((size_t)region + (aSize - 1)) & 0xffff800000000000) {
+ if (munmap(region, aSize)) {
+ MOZ_ASSERT(errno == ENOMEM);
+ }
+ region = MAP_FAILED;
+ }
}
- ret = region;
+ }
+ ret = region;
#else
- // We don't use MAP_FIXED here, because it can cause the *replacement*
- // of existing mappings, and we only want to create new mappings.
- ret = mmap(aAddr, aSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON, -1, 0);
- MOZ_ASSERT(ret);
+ // We don't use MAP_FIXED here, because it can cause the *replacement*
+ // of existing mappings, and we only want to create new mappings.
+ ret =
+ mmap(aAddr, aSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+ MOZ_ASSERT(ret);
#endif
- if (ret == MAP_FAILED) {
- ret = nullptr;
- }
-#if defined(__ia64__) || (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
- // If the allocated memory doesn't have its upper 17 bits clear, consider it
- // as out of memory.
- else if ((long long)ret & 0xffff800000000000) {
- munmap(ret, aSize);
- ret = nullptr;
- }
- // If the caller requested a specific memory location, verify that's what mmap returned.
- else if (check_placement && ret != aAddr) {
+ if (ret == MAP_FAILED) {
+ ret = nullptr;
+ }
+#if defined(__ia64__) || \
+ (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
+ // If the allocated memory doesn't have its upper 17 bits clear, consider it
+ // as out of memory.
+ else if ((long long)ret & 0xffff800000000000) {
+ munmap(ret, aSize);
+ ret = nullptr;
+ }
+ // If the caller requested a specific memory location, verify that's what mmap returned.
+ else if (check_placement && ret != aAddr) {
#else
- else if (aAddr && ret != aAddr) {
+ else if (aAddr && ret != aAddr) {
#endif
- // We succeeded in mapping memory, but not in the right place.
- pages_unmap(ret, aSize);
- ret = nullptr;
- }
- if (ret) {
- MozTagAnonymousMemory(ret, aSize, "jemalloc");
- }
-
-#if defined(__ia64__) || (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
- MOZ_ASSERT(!ret || (!check_placement && ret)
- || (check_placement && ret == aAddr));
+ // We succeeded in mapping memory, but not in the right place.
+ pages_unmap(ret, aSize);
+ ret = nullptr;
+ }
+ if (ret) {
+ MozTagAnonymousMemory(ret, aSize, "jemalloc");
+ }
+
+#if defined(__ia64__) || \
+ (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
+ MOZ_ASSERT(!ret || (!check_placement && ret) ||
+ (check_placement && ret == aAddr));
#else
- MOZ_ASSERT(!ret || (!aAddr && ret != aAddr)
- || (aAddr && ret == aAddr));
+ MOZ_ASSERT(!ret || (!aAddr && ret != aAddr) || (aAddr && ret == aAddr));
#endif
- return ret;
+ return ret;
}
#endif
#ifdef XP_DARWIN
-#define VM_COPY_MIN (pagesize << 5)
+#define VM_COPY_MIN (pagesize << 5)
static inline void
-pages_copy(void *dest, const void *src, size_t n)
+pages_copy(void* dest, const void* src, size_t n)
{
- MOZ_ASSERT((void *)((uintptr_t)dest & ~pagesize_mask) == dest);
- MOZ_ASSERT(n >= VM_COPY_MIN);
- MOZ_ASSERT((void *)((uintptr_t)src & ~pagesize_mask) == src);
-
- vm_copy(mach_task_self(), (vm_address_t)src, (vm_size_t)n,
- (vm_address_t)dest);
+ MOZ_ASSERT((void*)((uintptr_t)dest & ~pagesize_mask) == dest);
+ MOZ_ASSERT(n >= VM_COPY_MIN);
+ MOZ_ASSERT((void*)((uintptr_t)src & ~pagesize_mask) == src);
+
+ vm_copy(
+ mach_task_self(), (vm_address_t)src, (vm_size_t)n, (vm_address_t)dest);
}
#endif
-template <size_t Bits>
+template<size_t Bits>
bool
AddressRadixTree<Bits>::Init()
{
mLock.Init();
mRoot = (void**)base_calloc(1 << kBitsAtLevel1, sizeof(void*));
return mRoot;
}
-template <size_t Bits>
+template<size_t Bits>
void**
AddressRadixTree<Bits>::GetSlot(void* aKey, bool aCreate)
{
uintptr_t key = reinterpret_cast<uintptr_t>(aKey);
uintptr_t subkey;
unsigned i, lshift, height, bits;
void** node;
void** child;
- for (i = lshift = 0, height = kHeight, node = mRoot;
- i < height - 1;
+ for (i = lshift = 0, height = kHeight, node = mRoot; i < height - 1;
i++, lshift += bits, node = child) {
bits = i ? kBitsPerLevel : kBitsAtLevel1;
subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);
- child = (void**) node[subkey];
+ child = (void**)node[subkey];
if (!child && aCreate) {
- child = (void**) base_calloc(1 << kBitsPerLevel, sizeof(void*));
+ child = (void**)base_calloc(1 << kBitsPerLevel, sizeof(void*));
if (child) {
node[subkey] = child;
}
}
if (!child) {
return nullptr;
}
}
// node is a leaf, so it contains values rather than node
// pointers.
bits = i ? kBitsPerLevel : kBitsAtLevel1;
subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);
return &node[subkey];
}
-template <size_t Bits>
+template<size_t Bits>
void*
AddressRadixTree<Bits>::Get(void* aKey)
{
void* ret = nullptr;
void** slot = GetSlot(aKey);
if (slot) {
@@ -1665,174 +1715,173 @@ AddressRadixTree<Bits>::Get(void* aKey)
MOZ_ASSERT(ret == *slot);
} else {
MOZ_ASSERT(ret == nullptr);
}
#endif
return ret;
}
-template <size_t Bits>
+template<size_t Bits>
bool
AddressRadixTree<Bits>::Set(void* aKey, void* aValue)
{
MutexAutoLock lock(mLock);
void** slot = GetSlot(aKey, /* create = */ true);
if (slot) {
*slot = aValue;
}
return slot;
}
// pages_trim, chunk_alloc_mmap_slow and chunk_alloc_mmap were cherry-picked
// from upstream jemalloc 3.4.1 to fix Mozilla bug 956501.
// Return the offset between a and the nearest aligned address at or below a.
-#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
- ((size_t)((uintptr_t)(a) & (alignment - 1)))
+#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
+ ((size_t)((uintptr_t)(a) & (alignment - 1)))
// Return the smallest alignment multiple that is >= s.
-#define ALIGNMENT_CEILING(s, alignment) \
- (((s) + (alignment - 1)) & (~(alignment - 1)))
-
-static void *
-pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
+#define ALIGNMENT_CEILING(s, alignment) \
+ (((s) + (alignment - 1)) & (~(alignment - 1)))
+
+static void*
+pages_trim(void* addr, size_t alloc_size, size_t leadsize, size_t size)
{
- void *ret = (void *)((uintptr_t)addr + leadsize);
-
- MOZ_ASSERT(alloc_size >= leadsize + size);
+ void* ret = (void*)((uintptr_t)addr + leadsize);
+
+ MOZ_ASSERT(alloc_size >= leadsize + size);
#ifdef XP_WIN
- {
- void *new_addr;
-
- pages_unmap(addr, alloc_size);
- new_addr = pages_map(ret, size);
- if (new_addr == ret) {
- return ret;
- }
- if (new_addr) {
- pages_unmap(new_addr, size);
- }
- return nullptr;
- }
+ {
+ void* new_addr;
+
+ pages_unmap(addr, alloc_size);
+ new_addr = pages_map(ret, size);
+ if (new_addr == ret) {
+ return ret;
+ }
+ if (new_addr) {
+ pages_unmap(new_addr, size);
+ }
+ return nullptr;
+ }
#else
- {
- size_t trailsize = alloc_size - leadsize - size;
-
- if (leadsize != 0) {
- pages_unmap(addr, leadsize);
- }
- if (trailsize != 0) {
- pages_unmap((void *)((uintptr_t)ret + size), trailsize);
- }
- return ret;
- }
+ {
+ size_t trailsize = alloc_size - leadsize - size;
+
+ if (leadsize != 0) {
+ pages_unmap(addr, leadsize);
+ }
+ if (trailsize != 0) {
+ pages_unmap((void*)((uintptr_t)ret + size), trailsize);
+ }
+ return ret;
+ }
#endif
}
-static void *
+static void*
chunk_alloc_mmap_slow(size_t size, size_t alignment)
{
- void *ret, *pages;
- size_t alloc_size, leadsize;
-
- alloc_size = size + alignment - pagesize;
- // Beware size_t wrap-around.
- if (alloc_size < size) {
- return nullptr;
- }
- do {
- pages = pages_map(nullptr, alloc_size);
- if (!pages) {
- return nullptr;
- }
- leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
- (uintptr_t)pages;
- ret = pages_trim(pages, alloc_size, leadsize, size);
- } while (!ret);
-
- MOZ_ASSERT(ret);
- return ret;
+ void *ret, *pages;
+ size_t alloc_size, leadsize;
+
+ alloc_size = size + alignment - pagesize;
+ // Beware size_t wrap-around.
+ if (alloc_size < size) {
+ return nullptr;
+ }
+ do {
+ pages = pages_map(nullptr, alloc_size);
+ if (!pages) {
+ return nullptr;
+ }
+ leadsize =
+ ALIGNMENT_CEILING((uintptr_t)pages, alignment) - (uintptr_t)pages;
+ ret = pages_trim(pages, alloc_size, leadsize, size);
+ } while (!ret);
+
+ MOZ_ASSERT(ret);
+ return ret;
}
-static void *
+static void*
chunk_alloc_mmap(size_t size, size_t alignment)
{
- void *ret;
- size_t offset;
-
- // Ideally, there would be a way to specify alignment to mmap() (like
- // NetBSD has), but in the absence of such a feature, we have to work
- // hard to efficiently create aligned mappings. The reliable, but
- // slow method is to create a mapping that is over-sized, then trim the
- // excess. However, that always results in one or two calls to
- // pages_unmap().
- //
- // Optimistically try mapping precisely the right amount before falling
- // back to the slow method, with the expectation that the optimistic
- // approach works most of the time.
- ret = pages_map(nullptr, size);
- if (!ret) {
- return nullptr;
- }
- offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
- if (offset != 0) {
- pages_unmap(ret, size);
- return chunk_alloc_mmap_slow(size, alignment);
- }
-
- MOZ_ASSERT(ret);
- return ret;
+ void* ret;
+ size_t offset;
+
+ // Ideally, there would be a way to specify alignment to mmap() (like
+ // NetBSD has), but in the absence of such a feature, we have to work
+ // hard to efficiently create aligned mappings. The reliable, but
+ // slow method is to create a mapping that is over-sized, then trim the
+ // excess. However, that always results in one or two calls to
+ // pages_unmap().
+ //
+ // Optimistically try mapping precisely the right amount before falling
+ // back to the slow method, with the expectation that the optimistic
+ // approach works most of the time.
+ ret = pages_map(nullptr, size);
+ if (!ret) {
+ return nullptr;
+ }
+ offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
+ if (offset != 0) {
+ pages_unmap(ret, size);
+ return chunk_alloc_mmap_slow(size, alignment);
+ }
+
+ MOZ_ASSERT(ret);
+ return ret;
}
// Purge and release the pages in the chunk of length `length` at `addr` to
// the OS.
// Returns whether the pages are guaranteed to be full of zeroes when the
// function returns.
// The force_zero argument explicitly requests that the memory is guaranteed
// to be full of zeroes when the function returns.
static bool
-pages_purge(void *addr, size_t length, bool force_zero)
+pages_purge(void* addr, size_t length, bool force_zero)
{
#ifdef MALLOC_DECOMMIT
- pages_decommit(addr, length);
- return true;
+ pages_decommit(addr, length);
+ return true;
#else
-# ifndef XP_LINUX
- if (force_zero) {
- memset(addr, 0, length);
- }
-# endif
-# ifdef XP_WIN
- // The region starting at addr may have been allocated in multiple calls
- // to VirtualAlloc and recycled, so resetting the entire region in one
- // go may not be valid. However, since we allocate at least a chunk at a
- // time, we may touch any region in chunksized increments.
- size_t pages_size = std::min(length, chunksize -
- GetChunkOffsetForPtr(addr));
- while (length > 0) {
- VirtualAlloc(addr, pages_size, MEM_RESET, PAGE_READWRITE);
- addr = (void *)((uintptr_t)addr + pages_size);
- length -= pages_size;
- pages_size = std::min(length, chunksize);
- }
- return force_zero;
-# else
-# ifdef XP_LINUX
-# define JEMALLOC_MADV_PURGE MADV_DONTNEED
-# define JEMALLOC_MADV_ZEROS true
-# else // FreeBSD and Darwin.
-# define JEMALLOC_MADV_PURGE MADV_FREE
-# define JEMALLOC_MADV_ZEROS force_zero
-# endif
- int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
- return JEMALLOC_MADV_ZEROS && err == 0;
-# undef JEMALLOC_MADV_PURGE
-# undef JEMALLOC_MADV_ZEROS
-# endif
+#ifndef XP_LINUX
+ if (force_zero) {
+ memset(addr, 0, length);
+ }
+#endif
+#ifdef XP_WIN
+ // The region starting at addr may have been allocated in multiple calls
+ // to VirtualAlloc and recycled, so resetting the entire region in one
+ // go may not be valid. However, since we allocate at least a chunk at a
+ // time, we may touch any region in chunksized increments.
+ size_t pages_size = std::min(length, chunksize - GetChunkOffsetForPtr(addr));
+ while (length > 0) {
+ VirtualAlloc(addr, pages_size, MEM_RESET, PAGE_READWRITE);
+ addr = (void*)((uintptr_t)addr + pages_size);
+ length -= pages_size;
+ pages_size = std::min(length, chunksize);
+ }
+ return force_zero;
+#else
+#ifdef XP_LINUX
+#define JEMALLOC_MADV_PURGE MADV_DONTNEED
+#define JEMALLOC_MADV_ZEROS true
+#else // FreeBSD and Darwin.
+#define JEMALLOC_MADV_PURGE MADV_FREE
+#define JEMALLOC_MADV_ZEROS force_zero
+#endif
+ int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
+ return JEMALLOC_MADV_ZEROS && err == 0;
+#undef JEMALLOC_MADV_PURGE
+#undef JEMALLOC_MADV_ZEROS
+#endif
#endif
}
static void*
chunk_recycle(size_t aSize, size_t aAlignment, bool* aZeroed)
{
extent_node_t key;
@@ -1844,18 +1893,18 @@ chunk_recycle(size_t aSize, size_t aAlig
key.addr = nullptr;
key.size = alloc_size;
chunks_mtx.Lock();
extent_node_t* node = gChunksBySize.SearchOrNext(&key);
if (!node) {
chunks_mtx.Unlock();
return nullptr;
}
- size_t leadsize =
- ALIGNMENT_CEILING((uintptr_t)node->addr, aAlignment) - (uintptr_t)node->addr;
+ size_t leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, aAlignment) -
+ (uintptr_t)node->addr;
MOZ_ASSERT(node->size >= leadsize + aSize);
size_t trailsize = node->size - leadsize - aSize;
void* ret = (void*)((uintptr_t)node->addr + leadsize);
ChunkType chunk_type = node->chunk_type;
if (aZeroed) {
*aZeroed = (chunk_type == ZEROED_CHUNK);
}
// Remove node from the tree.
@@ -2085,45 +2134,46 @@ chunk_dealloc(void* aChunk, size_t aSize
}
#undef CAN_RECYCLE
// End chunk management functions.
// ***************************************************************************
// Begin arena.
-static inline arena_t *
+static inline arena_t*
thread_local_arena(bool enabled)
{
- arena_t *arena;
+ arena_t* arena;
if (enabled) {
// The arena will essentially be leaked if this function is
// called with `false`, but it doesn't matter at the moment.
// because in practice nothing actually calls this function
// with `false`, except maybe at shutdown.
arena = arenas_extend();
} else {
arena = gMainArena;
}
thread_arena.set(arena);
return arena;
}
-template<> inline void
+template<>
+inline void
MozJemalloc::jemalloc_thread_local_arena(bool aEnabled)
{
thread_local_arena(aEnabled);
}
// Choose an arena based on a per-thread value.
-static inline arena_t *
+static inline arena_t*
choose_arena(size_t size)
{
- arena_t *ret = nullptr;
+ arena_t* ret = nullptr;
// We can only use TLS if this is a PIC library, since for the static
// library version, libc's malloc is used by TLS allocation, which
// introduces a bootstrapping issue.
// Only use a thread local arena for small sizes.
if (size <= small_max) {
ret = thread_arena.get();
@@ -2131,163 +2181,167 @@ choose_arena(size_t size)
if (!ret) {
ret = thread_local_arena(false);
}
MOZ_DIAGNOSTIC_ASSERT(ret);
return ret;
}
-static inline void *
-arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
+static inline void*
+arena_run_reg_alloc(arena_run_t* run, arena_bin_t* bin)
{
- void *ret;
- unsigned i, mask, bit, regind;
-
- MOZ_ASSERT(run->magic == ARENA_RUN_MAGIC);
- MOZ_ASSERT(run->regs_minelm < bin->regs_mask_nelms);
-
- // Move the first check outside the loop, so that run->regs_minelm can
- // be updated unconditionally, without the possibility of updating it
- // multiple times.
- i = run->regs_minelm;
- mask = run->regs_mask[i];
- if (mask != 0) {
- // Usable allocation found.
- bit = ffs((int)mask) - 1;
-
- regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
- MOZ_ASSERT(regind < bin->nregs);
- ret = (void *)(((uintptr_t)run) + bin->reg0_offset
- + (bin->reg_size * regind));
-
- // Clear bit.
- mask ^= (1U << bit);
- run->regs_mask[i] = mask;
-
- return ret;
- }
-
- for (i++; i < bin->regs_mask_nelms; i++) {
- mask = run->regs_mask[i];
- if (mask != 0) {
- // Usable allocation found.
- bit = ffs((int)mask) - 1;
-
- regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
- MOZ_ASSERT(regind < bin->nregs);
- ret = (void *)(((uintptr_t)run) + bin->reg0_offset
- + (bin->reg_size * regind));
-
- // Clear bit.
- mask ^= (1U << bit);
- run->regs_mask[i] = mask;
-
- // Make a note that nothing before this element
- // contains a free region.
- run->regs_minelm = i; // Low payoff: + (mask == 0);
-
- return ret;
- }
- }
- // Not reached.
- MOZ_DIAGNOSTIC_ASSERT(0);
- return nullptr;
+ void* ret;
+ unsigned i, mask, bit, regind;
+
+ MOZ_ASSERT(run->magic == ARENA_RUN_MAGIC);
+ MOZ_ASSERT(run->regs_minelm < bin->regs_mask_nelms);
+
+ // Move the first check outside the loop, so that run->regs_minelm can
+ // be updated unconditionally, without the possibility of updating it
+ // multiple times.
+ i = run->regs_minelm;
+ mask = run->regs_mask[i];
+ if (mask != 0) {
+ // Usable allocation found.
+ bit = ffs((int)mask) - 1;
+
+ regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
+ MOZ_ASSERT(regind < bin->nregs);
+ ret =
+ (void*)(((uintptr_t)run) + bin->reg0_offset + (bin->reg_size * regind));
+
+ // Clear bit.
+ mask ^= (1U << bit);
+ run->regs_mask[i] = mask;
+
+ return ret;
+ }
+
+ for (i++; i < bin->regs_mask_nelms; i++) {
+ mask = run->regs_mask[i];
+ if (mask != 0) {
+ // Usable allocation found.
+ bit = ffs((int)mask) - 1;
+
+ regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
+ MOZ_ASSERT(regind < bin->nregs);
+ ret =
+ (void*)(((uintptr_t)run) + bin->reg0_offset + (bin->reg_size * regind));
+
+ // Clear bit.
+ mask ^= (1U << bit);
+ run->regs_mask[i] = mask;
+
+ // Make a note that nothing before this element
+ // contains a free region.
+ run->regs_minelm = i; // Low payoff: + (mask == 0);
+
+ return ret;
+ }
+ }
+ // Not reached.
+ MOZ_DIAGNOSTIC_ASSERT(0);
+ return nullptr;
}
static inline void
-arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size)
+arena_run_reg_dalloc(arena_run_t* run, arena_bin_t* bin, void* ptr, size_t size)
{
- // To divide by a number D that is not a power of two we multiply
- // by (2^21 / D) and then right shift by 21 positions.
- //
- // X / D
- //
- // becomes
- //
- // (X * size_invs[(D >> QUANTUM_2POW_MIN) - 3]) >> SIZE_INV_SHIFT
-
-#define SIZE_INV_SHIFT 21
-#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s << QUANTUM_2POW_MIN)) + 1)
- static const unsigned size_invs[] = {
- SIZE_INV(3),
- SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
- SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
- SIZE_INV(12),SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
- SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
- SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
- SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
- SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
+// To divide by a number D that is not a power of two we multiply
+// by (2^21 / D) and then right shift by 21 positions.
+//
+// X / D
+//
+// becomes
+//
+// (X * size_invs[(D >> QUANTUM_2POW_MIN) - 3]) >> SIZE_INV_SHIFT
+
+#define SIZE_INV_SHIFT 21
+#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s << QUANTUM_2POW_MIN)) + 1)
+ // clang-format off
+ static const unsigned size_invs[] = {
+ SIZE_INV(3),
+ SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
+ SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
+ SIZE_INV(12),SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
+ SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
+ SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
+ SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
+ SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
#if (QUANTUM_2POW_MIN < 4)
- ,
- SIZE_INV(32), SIZE_INV(33), SIZE_INV(34), SIZE_INV(35),
- SIZE_INV(36), SIZE_INV(37), SIZE_INV(38), SIZE_INV(39),
- SIZE_INV(40), SIZE_INV(41), SIZE_INV(42), SIZE_INV(43),
- SIZE_INV(44), SIZE_INV(45), SIZE_INV(46), SIZE_INV(47),
- SIZE_INV(48), SIZE_INV(49), SIZE_INV(50), SIZE_INV(51),
- SIZE_INV(52), SIZE_INV(53), SIZE_INV(54), SIZE_INV(55),
- SIZE_INV(56), SIZE_INV(57), SIZE_INV(58), SIZE_INV(59),
- SIZE_INV(60), SIZE_INV(61), SIZE_INV(62), SIZE_INV(63)
+ ,
+ SIZE_INV(32), SIZE_INV(33), SIZE_INV(34), SIZE_INV(35),
+ SIZE_INV(36), SIZE_INV(37), SIZE_INV(38), SIZE_INV(39),
+ SIZE_INV(40), SIZE_INV(41), SIZE_INV(42), SIZE_INV(43),
+ SIZE_INV(44), SIZE_INV(45), SIZE_INV(46), SIZE_INV(47),
+ SIZE_INV(48), SIZE_INV(49), SIZE_INV(50), SIZE_INV(51),
+ SIZE_INV(52), SIZE_INV(53), SIZE_INV(54), SIZE_INV(55),
+ SIZE_INV(56), SIZE_INV(57), SIZE_INV(58), SIZE_INV(59),
+ SIZE_INV(60), SIZE_INV(61), SIZE_INV(62), SIZE_INV(63)
#endif
- };
- unsigned diff, regind, elm, bit;
-
- MOZ_ASSERT(run->magic == ARENA_RUN_MAGIC);
- MOZ_ASSERT(((sizeof(size_invs)) / sizeof(unsigned)) + 3
- >= (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN));
-
- // Avoid doing division with a variable divisor if possible. Using
- // actual division here can reduce allocator throughput by over 20%!
- diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->reg0_offset);
- if ((size & (size - 1)) == 0) {
- // log2_table allows fast division of a power of two in the
- // [1..128] range.
- //
- // (x / divisor) becomes (x >> log2_table[divisor - 1]).
- static const unsigned char log2_table[] = {
- 0, 1, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7
- };
-
- if (size <= 128) {
- regind = (diff >> log2_table[size - 1]);
- } else if (size <= 32768) {
- regind = diff >> (8 + log2_table[(size >> 8) - 1]);
- } else {
- // The run size is too large for us to use the lookup
- // table. Use real division.
- regind = diff / size;
- }
- } else if (size <= ((sizeof(size_invs) / sizeof(unsigned))
- << QUANTUM_2POW_MIN) + 2) {
- regind = size_invs[(size >> QUANTUM_2POW_MIN) - 3] * diff;
- regind >>= SIZE_INV_SHIFT;
- } else {
- // size_invs isn't large enough to handle this size class, so
- // calculate regind using actual division. This only happens
- // if the user increases small_max via the 'S' runtime
- // configuration option.
- regind = diff / size;
- };
- MOZ_DIAGNOSTIC_ASSERT(diff == regind * size);
- MOZ_DIAGNOSTIC_ASSERT(regind < bin->nregs);
-
- elm = regind >> (SIZEOF_INT_2POW + 3);
- if (elm < run->regs_minelm) {
- run->regs_minelm = elm;
- }
- bit = regind - (elm << (SIZEOF_INT_2POW + 3));
- MOZ_DIAGNOSTIC_ASSERT((run->regs_mask[elm] & (1U << bit)) == 0);
- run->regs_mask[elm] |= (1U << bit);
+ };
+ // clang-format on
+ unsigned diff, regind, elm, bit;
+
+ MOZ_ASSERT(run->magic == ARENA_RUN_MAGIC);
+ MOZ_ASSERT(((sizeof(size_invs)) / sizeof(unsigned)) + 3 >=
+ (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN));
+
+ // Avoid doing division with a variable divisor if possible. Using
+ // actual division here can reduce allocator throughput by over 20%!
+ diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->reg0_offset);
+ if ((size & (size - 1)) == 0) {
+ // log2_table allows fast division of a power of two in the
+ // [1..128] range.
+ //
+ // (x / divisor) becomes (x >> log2_table[divisor - 1]).
+ // clang-format off
+ static const unsigned char log2_table[] = {
+ 0, 1, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7
+ };
+ // clang-format on
+
+ if (size <= 128) {
+ regind = (diff >> log2_table[size - 1]);
+ } else if (size <= 32768) {
+ regind = diff >> (8 + log2_table[(size >> 8) - 1]);
+ } else {
+ // The run size is too large for us to use the lookup
+ // table. Use real division.
+ regind = diff / size;
+ }
+ } else if (size <=
+ ((sizeof(size_invs) / sizeof(unsigned)) << QUANTUM_2POW_MIN) + 2) {
+ regind = size_invs[(size >> QUANTUM_2POW_MIN) - 3] * diff;
+ regind >>= SIZE_INV_SHIFT;
+ } else {
+ // size_invs isn't large enough to handle this size class, so
+ // calculate regind using actual division. This only happens
+ // if the user increases small_max via the 'S' runtime
+ // configuration option.
+ regind = diff / size;
+ };
+ MOZ_DIAGNOSTIC_ASSERT(diff == regind * size);
+ MOZ_DIAGNOSTIC_ASSERT(regind < bin->nregs);
+
+ elm = regind >> (SIZEOF_INT_2POW + 3);
+ if (elm < run->regs_minelm) {
+ run->regs_minelm = elm;
+ }
+ bit = regind - (elm << (SIZEOF_INT_2POW + 3));
+ MOZ_DIAGNOSTIC_ASSERT((run->regs_mask[elm] & (1U << bit)) == 0);
+ run->regs_mask[elm] |= (1U << bit);
#undef SIZE_INV
#undef SIZE_INV_SHIFT
}
bool
arena_t::SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero)
{
arena_chunk_t* chunk;
@@ -2308,64 +2362,65 @@ arena_t::SplitRun(arena_run_t* aRun, siz
// pages in one operation, in order to reduce system call
// overhead.
if (chunk->map[run_ind + i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) {
size_t j;
// Advance i+j to just past the index of the last page
// to commit. Clear CHUNK_MAP_DECOMMITTED and
// CHUNK_MAP_MADVISED along the way.
- for (j = 0; i + j < need_pages && (chunk->map[run_ind +
- i + j].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED); j++) {
+ for (j = 0; i + j < need_pages && (chunk->map[run_ind + i + j].bits &
+ CHUNK_MAP_MADVISED_OR_DECOMMITTED);
+ j++) {
// DECOMMITTED and MADVISED are mutually exclusive.
MOZ_ASSERT(!(chunk->map[run_ind + i + j].bits & CHUNK_MAP_DECOMMITTED &&
- chunk->map[run_ind + i + j].bits & CHUNK_MAP_MADVISED));
-
- chunk->map[run_ind + i + j].bits &=
- ~CHUNK_MAP_MADVISED_OR_DECOMMITTED;
+ chunk->map[run_ind + i + j].bits & CHUNK_MAP_MADVISED));
+
+ chunk->map[run_ind + i + j].bits &= ~CHUNK_MAP_MADVISED_OR_DECOMMITTED;
}
-# ifdef MALLOC_DECOMMIT
+#ifdef MALLOC_DECOMMIT
bool committed = pages_commit(
(void*)(uintptr_t(chunk) + ((run_ind + i) << pagesize_2pow)),
j << pagesize_2pow);
// pages_commit zeroes pages, so mark them as such if it succeeded.
// That's checked further below to avoid manually zeroing the pages.
for (size_t k = 0; k < j; k++) {
chunk->map[run_ind + i + k].bits |=
committed ? CHUNK_MAP_ZEROED : CHUNK_MAP_DECOMMITTED;
}
if (!committed) {
return false;
}
-# endif
+#endif
mStats.committed += j;
}
}
mRunsAvail.Remove(&chunk->map[run_ind]);
// Keep track of trailing unused pages for later use.
if (rem_pages > 0) {
- chunk->map[run_ind+need_pages].bits = (rem_pages <<
- pagesize_2pow) | (chunk->map[run_ind+need_pages].bits &
- pagesize_mask);
- chunk->map[run_ind+total_pages-1].bits = (rem_pages <<
- pagesize_2pow) | (chunk->map[run_ind+total_pages-1].bits &
- pagesize_mask);
- mRunsAvail.Insert(&chunk->map[run_ind+need_pages]);
+ chunk->map[run_ind + need_pages].bits =
+ (rem_pages << pagesize_2pow) |
+ (chunk->map[run_ind + need_pages].bits & pagesize_mask);
+ chunk->map[run_ind + total_pages - 1].bits =
+ (rem_pages << pagesize_2pow) |
+ (chunk->map[run_ind + total_pages - 1].bits & pagesize_mask);
+ mRunsAvail.Insert(&chunk->map[run_ind + need_pages]);
}
for (i = 0; i < need_pages; i++) {
// Zero if necessary.
if (aZero) {
if ((chunk->map[run_ind + i].bits & CHUNK_MAP_ZEROED) == 0) {
memset((void*)(uintptr_t(chunk) + ((run_ind + i) << pagesize_2pow)),
- 0, pagesize);
+ 0,
+ pagesize);
// CHUNK_MAP_ZEROED is cleared below.
}
}
// Update dirty page accounting.
if (chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) {
chunk->ndirty--;
mNumDirty--;
@@ -2403,53 +2458,55 @@ arena_t::InitChunk(arena_chunk_t* aChunk
// When the chunk we're initializating as an arena chunk is zeroed, we
// mark all runs are decommitted and zeroed.
// When it is not, which we can assume means it's a recycled arena chunk,
// all it can contain is an arena chunk header (which we're overwriting),
// and zeroed or poisoned memory (because a recycled arena chunk will
// have been emptied before being recycled). In that case, we can get
// away with reusing the chunk as-is, marking all runs as madvised.
- size_t flags = aZeroed ? CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED
- : CHUNK_MAP_MADVISED;
+ size_t flags =
+ aZeroed ? CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED : CHUNK_MAP_MADVISED;
mStats.mapped += chunksize;
aChunk->arena = this;
// Claim that no pages are in use, since the header is merely overhead.
aChunk->ndirty = 0;
// Initialize the map to contain one maximal free untouched run.
#ifdef MALLOC_DECOMMIT
- arena_run_t* run = (arena_run_t*)(uintptr_t(aChunk) +
- (arena_chunk_header_npages << pagesize_2pow));
+ arena_run_t* run =
+ (arena_run_t*)(uintptr_t(aChunk) +
+ (arena_chunk_header_npages << pagesize_2pow));
#endif
for (i = 0; i < arena_chunk_header_npages; i++) {
aChunk->map[i].bits = 0;
}
aChunk->map[i].bits = arena_maxclass | flags;
- for (i++; i < chunk_npages-1; i++) {
+ for (i++; i < chunk_npages - 1; i++) {
aChunk->map[i].bits = flags;
}
- aChunk->map[chunk_npages-1].bits = arena_maxclass | flags;
+ aChunk->map[chunk_npages - 1].bits = arena_maxclass | flags;
#ifdef MALLOC_DECOMMIT
// Start out decommitted, in order to force a closer correspondence
// between dirty pages and committed untouched pages.
pages_decommit(run, arena_maxclass);
#endif
mStats.committed += arena_chunk_header_npages;
// Insert the run into the tree of available runs.
mRunsAvail.Insert(&aChunk->map[arena_chunk_header_npages]);
#ifdef MALLOC_DOUBLE_PURGE
- new (&aChunk->chunks_madvised_elem) mozilla::DoublyLinkedListElement<arena_chunk_t>();
+ new (&aChunk->chunks_madvised_elem)
+ mozilla::DoublyLinkedListElement<arena_chunk_t>();
#endif
}
void
arena_t::DeallocChunk(arena_chunk_t* aChunk)
{
if (mSpare) {
if (mSpare->ndirty > 0) {
@@ -2487,39 +2544,41 @@ arena_t::AllocRun(arena_bin_t* aBin, siz
MOZ_ASSERT(aSize <= arena_maxclass);
MOZ_ASSERT((aSize & pagesize_mask) == 0);
// Search the arena's chunks for the lowest best fit.
key.bits = aSize | CHUNK_MAP_KEY;
mapelm = mRunsAvail.SearchOrNext(&key);
if (mapelm) {
arena_chunk_t* chunk = GetChunkForPtr(mapelm);
- size_t pageind = (uintptr_t(mapelm) - uintptr_t(chunk->map)) /
- sizeof(arena_chunk_map_t);
+ size_t pageind =
+ (uintptr_t(mapelm) - uintptr_t(chunk->map)) / sizeof(arena_chunk_map_t);
run = (arena_run_t*)(uintptr_t(chunk) + (pageind << pagesize_2pow));
} else if (mSpare) {
// Use the spare.
arena_chunk_t* chunk = mSpare;
mSpare = nullptr;
- run = (arena_run_t*)(uintptr_t(chunk) + (arena_chunk_header_npages << pagesize_2pow));
+ run = (arena_run_t*)(uintptr_t(chunk) +
+ (arena_chunk_header_npages << pagesize_2pow));
// Insert the run into the tree of available runs.
mRunsAvail.Insert(&chunk->map[arena_chunk_header_npages]);
} else {
// No usable runs. Create a new chunk from which to allocate
// the run.
bool zeroed;
- arena_chunk_t* chunk = (arena_chunk_t*)
- chunk_alloc(chunksize, chunksize, false, &zeroed);
+ arena_chunk_t* chunk =
+ (arena_chunk_t*)chunk_alloc(chunksize, chunksize, false, &zeroed);
if (!chunk) {
return nullptr;
}
InitChunk(chunk, zeroed);
- run = (arena_run_t*)(uintptr_t(chunk) + (arena_chunk_header_npages << pagesize_2pow));
+ run = (arena_run_t*)(uintptr_t(chunk) +
+ (arena_chunk_header_npages << pagesize_2pow));
}
// Update page map.
return SplitRun(run, aSize, aLarge, aZero) ? run : nullptr;
}
void
arena_t::Purge(bool aAll)
{
@@ -2551,44 +2610,44 @@ arena_t::Purge(bool aAll)
MOZ_DIAGNOSTIC_ASSERT(i >= arena_chunk_header_npages);
if (chunk->map[i].bits & CHUNK_MAP_DIRTY) {
#ifdef MALLOC_DECOMMIT
const size_t free_operation = CHUNK_MAP_DECOMMITTED;
#else
const size_t free_operation = CHUNK_MAP_MADVISED;
#endif
- MOZ_ASSERT((chunk->map[i].bits &
- CHUNK_MAP_MADVISED_OR_DECOMMITTED) == 0);
+ MOZ_ASSERT((chunk->map[i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) ==
+ 0);
chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
// Find adjacent dirty run(s).
- for (npages = 1;
- i > arena_chunk_header_npages &&
- (chunk->map[i - 1].bits & CHUNK_MAP_DIRTY);
+ for (npages = 1; i > arena_chunk_header_npages &&
+ (chunk->map[i - 1].bits & CHUNK_MAP_DIRTY);
npages++) {
i--;
- MOZ_ASSERT((chunk->map[i].bits &
- CHUNK_MAP_MADVISED_OR_DECOMMITTED) == 0);
+ MOZ_ASSERT((chunk->map[i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) ==
+ 0);
chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
}
chunk->ndirty -= npages;
mNumDirty -= npages;
#ifdef MALLOC_DECOMMIT
pages_decommit((void*)(uintptr_t(chunk) + (i << pagesize_2pow)),
(npages << pagesize_2pow));
#endif
mStats.committed -= npages;
#ifndef MALLOC_DECOMMIT
madvise((void*)(uintptr_t(chunk) + (i << pagesize_2pow)),
- (npages << pagesize_2pow), MADV_FREE);
-# ifdef MALLOC_DOUBLE_PURGE
+ (npages << pagesize_2pow),
+ MADV_FREE);
+#ifdef MALLOC_DOUBLE_PURGE
madvised = true;
-# endif
+#endif
#endif
if (mNumDirty <= (dirty_max >> 1)) {
break;
}
}
}
if (chunk->ndirty == 0) {
@@ -2624,131 +2683,132 @@ arena_t::DallocRun(arena_run_t* aRun, bo
}
run_pages = (size >> pagesize_2pow);
// Mark pages as unallocated in the chunk map.
if (aDirty) {
size_t i;
for (i = 0; i < run_pages; i++) {
- MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY)
- == 0);
+ MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) ==
+ 0);
chunk->map[run_ind + i].bits = CHUNK_MAP_DIRTY;
}
if (chunk->ndirty == 0) {
mChunksDirty.Insert(chunk);
}
chunk->ndirty += run_pages;
mNumDirty += run_pages;
} else {
size_t i;
for (i = 0; i < run_pages; i++) {
- chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE |
- CHUNK_MAP_ALLOCATED);
+ chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
}
}
- chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
- pagesize_mask);
- chunk->map[run_ind+run_pages-1].bits = size |
- (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
+ chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits & pagesize_mask);
+ chunk->map[run_ind + run_pages - 1].bits =
+ size | (chunk->map[run_ind + run_pages - 1].bits & pagesize_mask);
// Try to coalesce forward.
if (run_ind + run_pages < chunk_npages &&
- (chunk->map[run_ind+run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) {
- size_t nrun_size = chunk->map[run_ind+run_pages].bits &
- ~pagesize_mask;
+ (chunk->map[run_ind + run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) {
+ size_t nrun_size = chunk->map[run_ind + run_pages].bits & ~pagesize_mask;
// Remove successor from tree of available runs; the coalesced run is
// inserted later.
- mRunsAvail.Remove(&chunk->map[run_ind+run_pages]);
+ mRunsAvail.Remove(&chunk->map[run_ind + run_pages]);
size += nrun_size;
run_pages = size >> pagesize_2pow;
- MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind+run_pages-1].bits & ~pagesize_mask)
- == nrun_size);
- chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
- pagesize_mask);
- chunk->map[run_ind+run_pages-1].bits = size |
- (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
+ MOZ_DIAGNOSTIC_ASSERT(
+ (chunk->map[run_ind + run_pages - 1].bits & ~pagesize_mask) == nrun_size);
+ chunk->map[run_ind].bits =
+ size | (chunk->map[run_ind].bits & pagesize_mask);
+ chunk->map[run_ind + run_pages - 1].bits =
+ size | (chunk->map[run_ind + run_pages - 1].bits & pagesize_mask);
}
// Try to coalesce backward.
- if (run_ind > arena_chunk_header_npages && (chunk->map[run_ind-1].bits &
- CHUNK_MAP_ALLOCATED) == 0) {
- size_t prun_size = chunk->map[run_ind-1].bits & ~pagesize_mask;
+ if (run_ind > arena_chunk_header_npages &&
+ (chunk->map[run_ind - 1].bits & CHUNK_MAP_ALLOCATED) == 0) {
+ size_t prun_size = chunk->map[run_ind - 1].bits & ~pagesize_mask;
run_ind -= prun_size >> pagesize_2pow;
// Remove predecessor from tree of available runs; the coalesced run is
// inserted later.
mRunsAvail.Remove(&chunk->map[run_ind]);
size += prun_size;
run_pages = size >> pagesize_2pow;
MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind].bits & ~pagesize_mask) ==
- prun_size);
- chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
- pagesize_mask);
- chunk->map[run_ind+run_pages-1].bits = size |
- (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
+ prun_size);
+ chunk->map[run_ind].bits =
+ size | (chunk->map[run_ind].bits & pagesize_mask);
+ chunk->map[run_ind + run_pages - 1].bits =
+ size | (chunk->map[run_ind + run_pages - 1].bits & pagesize_mask);
}
// Insert into tree of available runs, now that coalescing is complete.
mRunsAvail.Insert(&chunk->map[run_ind]);
// Deallocate chunk if it is now completely unused.
- if ((chunk->map[arena_chunk_header_npages].bits & (~pagesize_mask |
- CHUNK_MAP_ALLOCATED)) == arena_maxclass) {
+ if ((chunk->map[arena_chunk_header_npages].bits &
+ (~pagesize_mask | CHUNK_MAP_ALLOCATED)) == arena_maxclass) {
DeallocChunk(chunk);
}
// Enforce mMaxDirty.
if (mNumDirty > mMaxDirty) {
Purge(false);
}
}
void
-arena_t::TrimRunHead(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize,
+arena_t::TrimRunHead(arena_chunk_t* aChunk,
+ arena_run_t* aRun,
+ size_t aOldSize,
size_t aNewSize)
{
size_t pageind = (uintptr_t(aRun) - uintptr_t(aChunk)) >> pagesize_2pow;
size_t head_npages = (aOldSize - aNewSize) >> pagesize_2pow;
MOZ_ASSERT(aOldSize > aNewSize);
// Update the chunk map so that arena_t::RunDalloc() can treat the
// leading run as separately allocated.
- aChunk->map[pageind].bits = (aOldSize - aNewSize) | CHUNK_MAP_LARGE |
- CHUNK_MAP_ALLOCATED;
- aChunk->map[pageind+head_npages].bits = aNewSize | CHUNK_MAP_LARGE |
- CHUNK_MAP_ALLOCATED;
+ aChunk->map[pageind].bits =
+ (aOldSize - aNewSize) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+ aChunk->map[pageind + head_npages].bits =
+ aNewSize | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
DallocRun(aRun, false);
}
void
-arena_t::TrimRunTail(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize,
- size_t aNewSize, bool aDirty)
+arena_t::TrimRunTail(arena_chunk_t* aChunk,
+ arena_run_t* aRun,
+ size_t aOldSize,
+ size_t aNewSize,
+ bool aDirty)
{
size_t pageind = (uintptr_t(aRun) - uintptr_t(aChunk)) >> pagesize_2pow;
size_t npages = aNewSize >> pagesize_2pow;
MOZ_ASSERT(aOldSize > aNewSize);
// Update the chunk map so that arena_t::RunDalloc() can treat the
// trailing run as separately allocated.
- aChunk->map[pageind].bits = aNewSize | CHUNK_MAP_LARGE |
- CHUNK_MAP_ALLOCATED;
- aChunk->map[pageind+npages].bits = (aOldSize - aNewSize) | CHUNK_MAP_LARGE
- | CHUNK_MAP_ALLOCATED;
+ aChunk->map[pageind].bits = aNewSize | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+ aChunk->map[pageind + npages].bits =
+ (aOldSize - aNewSize) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
DallocRun((arena_run_t*)(uintptr_t(aRun) + aNewSize), aDirty);
}
arena_run_t*
arena_t::GetNonFullBinRun(arena_bin_t* aBin)
{
arena_chunk_map_t* mapelm;
@@ -2782,18 +2842,18 @@ arena_t::GetNonFullBinRun(arena_bin_t* a
for (i = 0; i < aBin->regs_mask_nelms - 1; i++) {
run->regs_mask[i] = UINT_MAX;
}
remainder = aBin->nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1);
if (remainder == 0) {
run->regs_mask[i] = UINT_MAX;
} else {
// The last element has spare bits that need to be unset.
- run->regs_mask[i] = (UINT_MAX >> ((1U << (SIZEOF_INT_2POW + 3))
- - remainder));
+ run->regs_mask[i] =
+ (UINT_MAX >> ((1U << (SIZEOF_INT_2POW + 3)) - remainder));
}
run->regs_minelm = 0;
run->nfree = aBin->nregs;
#if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
run->magic = ARENA_RUN_MAGIC;
#endif
@@ -2837,80 +2897,80 @@ arena_t::MallocBinHard(arena_bin_t* aBin
// *) bin->run_size >= min_run_size
// *) bin->run_size <= arena_maxclass
// *) bin->run_size <= RUN_MAX_SMALL
// *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
//
// bin->nregs, bin->regs_mask_nelms, and bin->reg0_offset are
// also calculated here, since these settings are all interdependent.
static size_t
-arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
+arena_bin_run_size_calc(arena_bin_t* bin, size_t min_run_size)
{
- size_t try_run_size, good_run_size;
- unsigned good_nregs, good_mask_nelms, good_reg0_offset;
- unsigned try_nregs, try_mask_nelms, try_reg0_offset;
-
- MOZ_ASSERT(min_run_size >= pagesize);
- MOZ_ASSERT(min_run_size <= arena_maxclass);
-
- // Calculate known-valid settings before entering the run_size
- // expansion loop, so that the first part of the loop always copies
- // valid settings.
- //
- // The do..while loop iteratively reduces the number of regions until
- // the run header and the regions no longer overlap. A closed formula
- // would be quite messy, since there is an interdependency between the
- // header's mask length and the number of regions.
- try_run_size = min_run_size;
- try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin->reg_size)
- + 1; // Counter-act try_nregs-- in loop.
- do {
- try_nregs--;
- try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
- ((try_nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1)) ? 1 : 0);
- try_reg0_offset = try_run_size - (try_nregs * bin->reg_size);
- } while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1))
- > try_reg0_offset);
-
- // run_size expansion loop.
- do {
- // Copy valid settings before trying more aggressive settings.
- good_run_size = try_run_size;
- good_nregs = try_nregs;
- good_mask_nelms = try_mask_nelms;
- good_reg0_offset = try_reg0_offset;
-
- // Try more aggressive settings.
- try_run_size += pagesize;
- try_nregs = ((try_run_size - sizeof(arena_run_t)) /
- bin->reg_size) + 1; // Counter-act try_nregs-- in loop.
- do {
- try_nregs--;
- try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
- ((try_nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1)) ?
- 1 : 0);
- try_reg0_offset = try_run_size - (try_nregs *
- bin->reg_size);
- } while (sizeof(arena_run_t) + (sizeof(unsigned) *
- (try_mask_nelms - 1)) > try_reg0_offset);
- } while (try_run_size <= arena_maxclass
- && RUN_MAX_OVRHD * (bin->reg_size << 3) > RUN_MAX_OVRHD_RELAX
- && (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size);
-
- MOZ_ASSERT(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1))
- <= good_reg0_offset);
- MOZ_ASSERT((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
-
- // Copy final settings.
- bin->run_size = good_run_size;
- bin->nregs = good_nregs;
- bin->regs_mask_nelms = good_mask_nelms;
- bin->reg0_offset = good_reg0_offset;
-
- return good_run_size;
+ size_t try_run_size, good_run_size;
+ unsigned good_nregs, good_mask_nelms, good_reg0_offset;
+ unsigned try_nregs, try_mask_nelms, try_reg0_offset;
+
+ MOZ_ASSERT(min_run_size >= pagesize);
+ MOZ_ASSERT(min_run_size <= arena_maxclass);
+
+ // Calculate known-valid settings before entering the run_size
+ // expansion loop, so that the first part of the loop always copies
+ // valid settings.
+ //
+ // The do..while loop iteratively reduces the number of regions until
+ // the run header and the regions no longer overlap. A closed formula
+ // would be quite messy, since there is an interdependency between the
+ // header's mask length and the number of regions.
+ try_run_size = min_run_size;
+ try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin->reg_size) +
+ 1; // Counter-act try_nregs-- in loop.
+ do {
+ try_nregs--;
+ try_mask_nelms =
+ (try_nregs >> (SIZEOF_INT_2POW + 3)) +
+ ((try_nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1)) ? 1 : 0);
+ try_reg0_offset = try_run_size - (try_nregs * bin->reg_size);
+ } while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1)) >
+ try_reg0_offset);
+
+ // run_size expansion loop.
+ do {
+ // Copy valid settings before trying more aggressive settings.
+ good_run_size = try_run_size;
+ good_nregs = try_nregs;
+ good_mask_nelms = try_mask_nelms;
+ good_reg0_offset = try_reg0_offset;
+
+ // Try more aggressive settings.
+ try_run_size += pagesize;
+ try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin->reg_size) +
+ 1; // Counter-act try_nregs-- in loop.
+ do {
+ try_nregs--;
+ try_mask_nelms =
+ (try_nregs >> (SIZEOF_INT_2POW + 3)) +
+ ((try_nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1)) ? 1 : 0);
+ try_reg0_offset = try_run_size - (try_nregs * bin->reg_size);
+ } while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1)) >
+ try_reg0_offset);
+ } while (try_run_size <= arena_maxclass &&
+ RUN_MAX_OVRHD * (bin->reg_size << 3) > RUN_MAX_OVRHD_RELAX &&
+ (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size);
+
+ MOZ_ASSERT(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1)) <=
+ good_reg0_offset);
+ MOZ_ASSERT((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
+
+ // Copy final settings.
+ bin->run_size = good_run_size;
+ bin->nregs = good_nregs;
+ bin->regs_mask_nelms = good_mask_nelms;
+ bin->reg0_offset = good_reg0_offset;
+
+ return good_run_size;
}
void*
arena_t::MallocSmall(size_t aSize, bool aZero)
{
void* ret;
arena_bin_t* bin;
arena_run_t* run;
@@ -2928,18 +2988,18 @@ arena_t::MallocSmall(size_t aSize, bool
}
} else if (aSize <= small_max) {
// Quantum-spaced.
aSize = QUANTUM_CEILING(aSize);
bin = &mBins[ntbins + (aSize >> QUANTUM_2POW_MIN) - 1];
} else {
// Sub-page.
aSize = pow2_ceil(aSize);
- bin = &mBins[ntbins + nqbins
- + (ffs((int)(aSize >> SMALL_MAX_2POW_DEFAULT)) - 2)];
+ bin = &mBins[ntbins + nqbins +
+ (ffs((int)(aSize >> SMALL_MAX_2POW_DEFAULT)) - 2)];
}
MOZ_DIAGNOSTIC_ASSERT(aSize == bin->reg_size);
{
MutexAutoLock lock(mLock);
if ((run = bin->runcur) && run->nfree > 0) {
ret = MallocBinEasy(bin, run);
} else {
@@ -3042,17 +3102,18 @@ arena_t::Palloc(size_t aAlignment, size_
MOZ_ASSERT(offset < aAllocSize);
if (offset == 0) {
TrimRunTail(chunk, (arena_run_t*)ret, aAllocSize, aSize, false);
} else {
size_t leadsize, trailsize;
leadsize = aAlignment - offset;
if (leadsize > 0) {
- TrimRunHead(chunk, (arena_run_t*)ret, aAllocSize, aAllocSize - leadsize);
+ TrimRunHead(
+ chunk, (arena_run_t*)ret, aAllocSize, aAllocSize - leadsize);
ret = (void*)(uintptr_t(ret) + leadsize);
}
trailsize = aAllocSize - leadsize - aSize;
if (trailsize != 0) {
// Trim trailing space.
MOZ_ASSERT(trailsize < aAllocSize);
TrimRunTail(chunk, (arena_run_t*)ret, aSize + trailsize, aSize, false);
@@ -3096,18 +3157,18 @@ ipalloc(size_t aAlignment, size_t aSize,
// (ceil_size < aSize) protects against the combination of maximal
// alignment and size greater than maximal alignment.
if (ceil_size < aSize) {
// size_t overflow.
return nullptr;
}
- if (ceil_size <= pagesize || (aAlignment <= pagesize
- && ceil_size <= arena_maxclass)) {
+ if (ceil_size <= pagesize ||
+ (aAlignment <= pagesize && ceil_size <= arena_maxclass)) {
aArena = aArena ? aArena : choose_arena(aSize);
ret = aArena->Malloc(ceil_size, false);
} else {
size_t run_size;
// We can't achieve sub-page alignment, so round up alignment
// permanently; it makes later calculations simpler.
aAlignment = PAGE_CEILING(aAlignment);
@@ -3154,39 +3215,39 @@ ipalloc(size_t aAlignment, size_t aSize,
}
MOZ_ASSERT((uintptr_t(ret) & (aAlignment - 1)) == 0);
return ret;
}
// Return the size of the allocation pointed to by ptr.
static size_t
-arena_salloc(const void *ptr)
+arena_salloc(const void* ptr)
{
- size_t ret;
- arena_chunk_t *chunk;
- size_t pageind, mapbits;
-
- MOZ_ASSERT(ptr);
- MOZ_ASSERT(GetChunkOffsetForPtr(ptr) != 0);
-
- chunk = GetChunkForPtr(ptr);
- pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
- mapbits = chunk->map[pageind].bits;
- MOZ_DIAGNOSTIC_ASSERT((mapbits & CHUNK_MAP_ALLOCATED) != 0);
- if ((mapbits & CHUNK_MAP_LARGE) == 0) {
- arena_run_t *run = (arena_run_t *)(mapbits & ~pagesize_mask);
- MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
- ret = run->bin->reg_size;
- } else {
- ret = mapbits & ~pagesize_mask;
- MOZ_DIAGNOSTIC_ASSERT(ret != 0);
- }
-
- return ret;
+ size_t ret;
+ arena_chunk_t* chunk;
+ size_t pageind, mapbits;
+
+ MOZ_ASSERT(ptr);
+ MOZ_ASSERT(GetChunkOffsetForPtr(ptr) != 0);
+
+ chunk = GetChunkForPtr(ptr);
+ pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
+ mapbits = chunk->map[pageind].bits;
+ MOZ_DIAGNOSTIC_ASSERT((mapbits & CHUNK_MAP_ALLOCATED) != 0);
+ if ((mapbits & CHUNK_MAP_LARGE) == 0) {
+ arena_run_t* run = (arena_run_t*)(mapbits & ~pagesize_mask);
+ MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
+ ret = run->bin->reg_size;
+ } else {
+ ret = mapbits & ~pagesize_mask;
+ MOZ_DIAGNOSTIC_ASSERT(ret != 0);
+ }
+
+ return ret;
}
// Validate ptr before assuming that it points to an allocation. Currently,
// the following validation is performed:
//
// + Check that ptr is not nullptr.
//
// + Check that ptr lies within a mapped chunk.
@@ -3245,17 +3306,18 @@ isalloc(const void* aPtr)
// Extract from tree of huge allocations.
key.addr = const_cast<void*>(aPtr);
extent_node_t* node = huge.Search(&key);
MOZ_DIAGNOSTIC_ASSERT(node);
return node->size;
}
-template<> inline void
+template<>
+inline void
MozJemalloc::jemalloc_ptr_info(const void* aPtr, jemalloc_ptr_info_t* aInfo)
{
arena_chunk_t* chunk = GetChunkForPtr(aPtr);
// Is the pointer null, or within one chunk's size of null?
if (!chunk) {
*aInfo = { TagUnknown, nullptr, 0 };
return;
@@ -3264,18 +3326,20 @@ MozJemalloc::jemalloc_ptr_info(const voi
// Look for huge allocations before looking for |chunk| in gChunkRTree.
// This is necessary because |chunk| won't be in gChunkRTree if it's
// the second or subsequent chunk in a huge allocation.
extent_node_t* node;
extent_node_t key;
{
MutexAutoLock lock(huge_mtx);
key.addr = const_cast<void*>(aPtr);
- node = reinterpret_cast<
- RedBlackTree<extent_node_t, ExtentTreeBoundsTrait>*>(&huge)->Search(&key);
+ node =
+ reinterpret_cast<RedBlackTree<extent_node_t, ExtentTreeBoundsTrait>*>(
+ &huge)
+ ->Search(&key);
if (node) {
*aInfo = { TagLiveHuge, node->addr, node->size };
return;
}
}
// It's not a huge allocation. Check if we have a known chunk.
if (!gChunkRTree.Get(chunk)) {
@@ -3343,17 +3407,17 @@ MozJemalloc::jemalloc_ptr_info(const voi
}
void* addr = ((char*)chunk) + (pageind << pagesize_2pow);
*aInfo = { TagLiveLarge, addr, size };
return;
}
// It must be a small allocation.
- auto run = (arena_run_t *)(mapbits & ~pagesize_mask);
+ auto run = (arena_run_t*)(mapbits & ~pagesize_mask);
MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
// The allocation size is stored in the run metadata.
size_t size = run->bin->reg_size;
// Address of the first possible pointer in the run after its headers.
uintptr_t reg0_addr = (uintptr_t)run + run->bin->reg0_offset;
if (aPtr < (void*)reg0_addr) {
@@ -3366,59 +3430,62 @@ MozJemalloc::jemalloc_ptr_info(const voi
unsigned regind = ((uintptr_t)aPtr - reg0_addr) / size;
// Pointer to the allocation's base address.
void* addr = (void*)(reg0_addr + regind * size);
// Check if the allocation has been freed.
unsigned elm = regind >> (SIZEOF_INT_2POW + 3);
unsigned bit = regind - (elm << (SIZEOF_INT_2POW + 3));
- PtrInfoTag tag = ((run->regs_mask[elm] & (1U << bit)))
- ? TagFreedSmall : TagLiveSmall;
-
- *aInfo = { tag, addr, size};
+ PtrInfoTag tag =
+ ((run->regs_mask[elm] & (1U << bit))) ? TagFreedSmall : TagLiveSmall;
+
+ *aInfo = { tag, addr, size };
}
-namespace Debug
+namespace Debug {
+// Helper for debuggers. We don't want it to be inlined and optimized out.
+MOZ_NEVER_INLINE jemalloc_ptr_info_t*
+jemalloc_ptr_info(const void* aPtr)
{
- // Helper for debuggers. We don't want it to be inlined and optimized out.
- MOZ_NEVER_INLINE jemalloc_ptr_info_t*
- jemalloc_ptr_info(const void* aPtr)
- {
- static jemalloc_ptr_info_t info;
- MozJemalloc::jemalloc_ptr_info(aPtr, &info);
- return &info;
- }
+ static jemalloc_ptr_info_t info;
+ MozJemalloc::jemalloc_ptr_info(aPtr, &info);
+ return &info;
+}
}
void
-arena_t::DallocSmall(arena_chunk_t* aChunk, void* aPtr, arena_chunk_map_t* aMapElm)
+arena_t::DallocSmall(arena_chunk_t* aChunk,
+ void* aPtr,
+ arena_chunk_map_t* aMapElm)
{
arena_run_t* run;
arena_bin_t* bin;
size_t size;
run = (arena_run_t*)(aMapElm->bits & ~pagesize_mask);
MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
bin = run->bin;
size = bin->reg_size;
MOZ_DIAGNOSTIC_ASSERT(uintptr_t(aPtr) >= uintptr_t(run) + bin->reg0_offset);
- MOZ_DIAGNOSTIC_ASSERT((uintptr_t(aPtr) - (uintptr_t(run) + bin->reg0_offset)) % size == 0);
+ MOZ_DIAGNOSTIC_ASSERT(
+ (uintptr_t(aPtr) - (uintptr_t(run) + bin->reg0_offset)) % size == 0);
memset(aPtr, kAllocPoison, size);
arena_run_reg_dalloc(run, bin, aPtr, size);
run->nfree++;
if (run->nfree == bin->nregs) {
// Deallocate run.
if (run == bin->runcur) {
bin->runcur = nullptr;
} else if (bin->nregs != 1) {
- size_t run_pageind = (uintptr_t(run) - uintptr_t(aChunk)) >> pagesize_2pow;
+ size_t run_pageind =
+ (uintptr_t(run) - uintptr_t(aChunk)) >> pagesize_2pow;
arena_chunk_map_t* run_mapelm = &aChunk->map[run_pageind];
// This block's conditional is necessary because if the
// run only contains one region, then it never gets
// inserted into the non-full runs tree.
MOZ_DIAGNOSTIC_ASSERT(bin->runs.Search(run_mapelm) == run_mapelm);
bin->runs.Remove(run_mapelm);
}
@@ -3431,27 +3498,29 @@ arena_t::DallocSmall(arena_chunk_t* aChu
// Make sure that bin->runcur always refers to the lowest
// non-full run, if one exists.
if (!bin->runcur) {
bin->runcur = run;
} else if (uintptr_t(run) < uintptr_t(bin->runcur)) {
// Switch runcur.
if (bin->runcur->nfree > 0) {
arena_chunk_t* runcur_chunk = GetChunkForPtr(bin->runcur);
- size_t runcur_pageind = (uintptr_t(bin->runcur) - uintptr_t(runcur_chunk)) >> pagesize_2pow;
+ size_t runcur_pageind =
+ (uintptr_t(bin->runcur) - uintptr_t(runcur_chunk)) >> pagesize_2pow;
arena_chunk_map_t* runcur_mapelm = &runcur_chunk->map[runcur_pageind];
// Insert runcur.
MOZ_DIAGNOSTIC_ASSERT(!bin->runs.Search(runcur_mapelm));
bin->runs.Insert(runcur_mapelm);
}
bin->runcur = run;
} else {
- size_t run_pageind = (uintptr_t(run) - uintptr_t(aChunk)) >> pagesize_2pow;
- arena_chunk_map_t *run_mapelm = &aChunk->map[run_pageind];
+ size_t run_pageind =
+ (uintptr_t(run) - uintptr_t(aChunk)) >> pagesize_2pow;
+ arena_chunk_map_t* run_mapelm = &aChunk->map[run_pageind];
MOZ_DIAGNOSTIC_ASSERT(bin->runs.Search(run_mapelm) == nullptr);
bin->runs.Insert(run_mapelm);
}
}
mStats.allocated_small -= size;
}
@@ -3470,17 +3539,17 @@ arena_t::DallocLarge(arena_chunk_t* aChu
static inline void
arena_dalloc(void* aPtr, size_t aOffset)
{
MOZ_ASSERT(aPtr);
MOZ_ASSERT(aOffset != 0);
MOZ_ASSERT(GetChunkOffsetForPtr(aPtr) == aOffset);
- auto chunk = (arena_chunk_t*) ((uintptr_t)aPtr - aOffset);
+ auto chunk = (arena_chunk_t*)((uintptr_t)aPtr - aOffset);
auto arena = chunk->arena;
MOZ_ASSERT(arena);
MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
MutexAutoLock lock(arena->mLock);
size_t pageind = aOffset >> pagesize_2pow;
arena_chunk_map_t* mapelm = &chunk->map[pageind];
MOZ_DIAGNOSTIC_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
@@ -3489,74 +3558,78 @@ arena_dalloc(void* aPtr, size_t aOffset)
arena->DallocSmall(chunk, aPtr, mapelm);
} else {
// Large allocation.
arena->DallocLarge(chunk, aPtr);
}
}
static inline void
-idalloc(void *ptr)
+idalloc(void* ptr)
{
- size_t offset;
-
- MOZ_ASSERT(ptr);
-
- offset = GetChunkOffsetForPtr(ptr);
- if (offset != 0) {
- arena_dalloc(ptr, offset);
- } else {
- huge_dalloc(ptr);
- }
+ size_t offset;
+
+ MOZ_ASSERT(ptr);
+
+ offset = GetChunkOffsetForPtr(ptr);
+ if (offset != 0) {
+ arena_dalloc(ptr, offset);
+ } else {
+ huge_dalloc(ptr);
+ }
}
void
-arena_t::RallocShrinkLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
+arena_t::RallocShrinkLarge(arena_chunk_t* aChunk,
+ void* aPtr,
+ size_t aSize,
size_t aOldSize)
{
MOZ_ASSERT(aSize < aOldSize);
// Shrink the run, and make trailing pages available for other
// allocations.
MutexAutoLock lock(mLock);
TrimRunTail(aChunk, (arena_run_t*)aPtr, aOldSize, aSize, true);
mStats.allocated_large -= aOldSize - aSize;
}
// Returns whether reallocation was successful.
bool
-arena_t::RallocGrowLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
+arena_t::RallocGrowLarge(arena_chunk_t* aChunk,
+ void* aPtr,
+ size_t aSize,
size_t aOldSize)
{
size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> pagesize_2pow;
size_t npages = aOldSize >> pagesize_2pow;
MutexAutoLock lock(mLock);
- MOZ_DIAGNOSTIC_ASSERT(aOldSize == (aChunk->map[pageind].bits & ~pagesize_mask));
+ MOZ_DIAGNOSTIC_ASSERT(aOldSize ==
+ (aChunk->map[pageind].bits & ~pagesize_mask));
// Try to extend the run.
MOZ_ASSERT(aSize > aOldSize);
- if (pageind + npages < chunk_npages && (aChunk->map[pageind+npages].bits
- & CHUNK_MAP_ALLOCATED) == 0 && (aChunk->map[pageind+npages].bits &
- ~pagesize_mask) >= aSize - aOldSize) {
+ if (pageind + npages < chunk_npages &&
+ (aChunk->map[pageind + npages].bits & CHUNK_MAP_ALLOCATED) == 0 &&
+ (aChunk->map[pageind + npages].bits & ~pagesize_mask) >=
+ aSize - aOldSize) {
// The next run is available and sufficiently large. Split the
// following run, then merge the first part with the existing
// allocation.
if (!SplitRun((arena_run_t*)(uintptr_t(aChunk) +
((pageind + npages) << pagesize_2pow)),
aSize - aOldSize,
true,
false)) {
return false;
}
- aChunk->map[pageind].bits = aSize | CHUNK_MAP_LARGE |
- CHUNK_MAP_ALLOCATED;
- aChunk->map[pageind+npages].bits = CHUNK_MAP_LARGE |
- CHUNK_MAP_ALLOCATED;
+ aChunk->map[pageind].bits = aSize | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+ aChunk->map[pageind + npages].bits = CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
mStats.allocated_large += aSize - aOldSize;
return true;
}
return false;
}
@@ -3600,23 +3673,23 @@ arena_ralloc(void* aPtr, size_t aSize, s
{
void* ret;
size_t copysize;
// Try to avoid moving the allocation.
if (aSize < small_min) {
if (aOldSize < small_min &&
ffs((int)(pow2_ceil(aSize) >> (TINY_MIN_2POW + 1))) ==
- ffs((int)(pow2_ceil(aOldSize) >> (TINY_MIN_2POW + 1)))) {
+ ffs((int)(pow2_ceil(aOldSize) >> (TINY_MIN_2POW + 1)))) {
goto IN_PLACE; // Same size class.
}
} else if (aSize <= small_max) {
if (aOldSize >= small_min && aOldSize <= small_max &&
(QUANTUM_CEILING(aSize) >> QUANTUM_2POW_MIN) ==
- (QUANTUM_CEILING(aOldSize) >> QUANTUM_2POW_MIN)) {
+ (QUANTUM_CEILING(aOldSize) >> QUANTUM_2POW_MIN)) {
goto IN_PLACE; // Same size class.
}
} else if (aSize <= bin_maxclass) {
if (aOldSize > small_max && aOldSize <= bin_maxclass &&
pow2_ceil(aSize) == pow2_ceil(aOldSize)) {
goto IN_PLACE; // Same size class.
}
} else if (aOldSize > bin_maxclass && aOldSize <= arena_maxclass) {
@@ -3644,19 +3717,19 @@ arena_ralloc(void* aPtr, size_t aSize, s
#endif
{
memcpy(ret, aPtr, copysize);
}
idalloc(aPtr);
return ret;
IN_PLACE:
if (aSize < aOldSize) {
- memset((void *)(uintptr_t(aPtr) + aSize), kAllocPoison, aOldSize - aSize);
+ memset((void*)(uintptr_t(aPtr) + aSize), kAllocPoison, aOldSize - aSize);
} else if (opt_zero && aSize > aOldSize) {
- memset((void *)(uintptr_t(aPtr) + aOldSize), 0, aSize - aOldSize);
+ memset((void*)(uintptr_t(aPtr) + aOldSize), 0, aSize - aOldSize);
}
return aPtr;
}
static inline void*
iralloc(void* aPtr, size_t aSize, arena_t* aArena)
{
size_t oldsize;
@@ -3743,40 +3816,39 @@ arena_t::Init()
#if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
mMagic = ARENA_MAGIC;
#endif
return true;
}
-static inline arena_t *
+static inline arena_t*
arenas_fallback()
{
// Only reached if there is an OOM error.
// OOM here is quite inconvenient to propagate, since dealing with it
// would require a check for failure in the fast path. Instead, punt
// by using the first arena.
// In practice, this is an extremely unlikely failure.
- _malloc_message(_getprogname(),
- ": (malloc) Error initializing arena\n");
+ _malloc_message(_getprogname(), ": (malloc) Error initializing arena\n");
return gMainArena;
}
// Create a new arena and return it.
static arena_t*
arenas_extend()
{
arena_t* ret;
// Allocate enough space for trailing bins.
- ret = (arena_t*)base_alloc(sizeof(arena_t) +
- (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
+ ret = (arena_t*)base_alloc(
+ sizeof(arena_t) + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
if (!ret || !ret->Init()) {
return arenas_fallback();
}
MutexAutoLock lock(arenas_lock);
// TODO: Use random Ids.
ret->mId = narenas++;
@@ -3784,20 +3856,20 @@ arenas_extend()
return ret;
}
// End arena.
// ***************************************************************************
// Begin general internal functions.
-static void *
+static void*
huge_malloc(size_t size, bool zero)
{
- return huge_palloc(size, chunksize, zero);
+ return huge_palloc(size, chunksize, zero);
}
static void*
huge_palloc(size_t aSize, size_t aAlignment, bool aZero)
{
void* ret;
size_t csize;
size_t psize;
@@ -3860,27 +3932,27 @@ huge_palloc(size_t aSize, size_t aAlignm
#ifdef MALLOC_DECOMMIT
if (csize - psize > 0) {
pages_decommit((void*)((uintptr_t)ret + psize), csize - psize);
}
#endif
if (aZero == false) {
if (opt_junk) {
-# ifdef MALLOC_DECOMMIT
+#ifdef MALLOC_DECOMMIT
memset(ret, kAllocJunk, psize);
-# else
+#else
memset(ret, kAllocJunk, csize);
-# endif
+#endif
} else if (opt_zero) {
-# ifdef MALLOC_DECOMMIT
+#ifdef MALLOC_DECOMMIT
memset(ret, 0, psize);
-# else
+#else
memset(ret, 0, csize);
-# endif
+#endif
}
}
return ret;
}
static void*
huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize)
@@ -3988,27 +4060,27 @@ huge_dalloc(void* aPtr)
base_node_dealloc(node);
}
// FreeBSD's pthreads implementation calls malloc(3), so the malloc
// implementation has to take pains to avoid infinite recursion during
// initialization.
#if defined(XP_WIN)
-#define malloc_init() false
+#define malloc_init() false
#else
static inline bool
malloc_init(void)
{
- if (malloc_initialized == false) {
- return malloc_init_hard();
- }
-
- return false;
+ if (malloc_initialized == false) {
+ return malloc_init_hard();
+ }
+
+ return false;
}
#endif
static size_t
GetKernelPageSize()
{
static size_t kernel_page_size = ([]() {
#ifdef XP_WIN
@@ -4022,21 +4094,21 @@ GetKernelPageSize()
#endif
})();
return kernel_page_size;
}
#if !defined(XP_WIN)
static
#endif
-bool
-malloc_init_hard(void)
+ bool
+ malloc_init_hard(void)
{
unsigned i;
- const char *opts;
+ const char* opts;
long result;
#ifndef XP_WIN
MutexAutoLock lock(gInitLock);
#endif
if (malloc_initialized) {
// Another thread initialized the allocator before this one
@@ -4048,89 +4120,98 @@ malloc_init_hard(void)
return false;
}
// Get page size and number of CPUs
result = GetKernelPageSize();
// We assume that the page size is a power of 2.
MOZ_ASSERT(((result - 1) & result) == 0);
#ifdef MALLOC_STATIC_PAGESIZE
- if (pagesize % (size_t) result) {
- _malloc_message(_getprogname(),
- "Compile-time page size does not divide the runtime one.\n");
+ if (pagesize % (size_t)result) {
+ _malloc_message(
+ _getprogname(),
+ "Compile-time page size does not divide the runtime one.\n");
MOZ_CRASH();
}
#else
- pagesize = (size_t) result;
- pagesize_mask = (size_t) result - 1;
+ pagesize = (size_t)result;
+ pagesize_mask = (size_t)result - 1;
pagesize_2pow = ffs((int)result) - 1;
#endif
// Get runtime configuration.
if ((opts = getenv("MALLOC_OPTIONS"))) {
for (i = 0; opts[i] != '\0'; i++) {
unsigned j, nreps;
bool nseen;
// Parse repetition count, if any.
for (nreps = 0, nseen = false;; i++, nseen = true) {
switch (opts[i]) {
- case '0': case '1': case '2': case '3':
- case '4': case '5': case '6': case '7':
- case '8': case '9':
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
nreps *= 10;
nreps += opts[i] - '0';
break;
default:
goto MALLOC_OUT;
}
}
-MALLOC_OUT:
+ MALLOC_OUT:
if (nseen == false) {
nreps = 1;
}
for (j = 0; j < nreps; j++) {
switch (opts[i]) {
- case 'f':
- opt_dirty_max >>= 1;
- break;
- case 'F':
- if (opt_dirty_max == 0) {
- opt_dirty_max = 1;
- } else if ((opt_dirty_max << 1) != 0) {
- opt_dirty_max <<= 1;
- }
- break;
+ case 'f':
+ opt_dirty_max >>= 1;
+ break;
+ case 'F':
+ if (opt_dirty_max == 0) {
+ opt_dirty_max = 1;
+ } else if ((opt_dirty_max << 1) != 0) {
+ opt_dirty_max <<= 1;
+ }
+ break;
#ifdef MOZ_DEBUG
- case 'j':
- opt_junk = false;
- break;
- case 'J':
- opt_junk = true;
- break;
+ case 'j':
+ opt_junk = false;
+ break;
+ case 'J':
+ opt_junk = true;
+ break;
#endif
#ifdef MOZ_DEBUG
- case 'z':
- opt_zero = false;
- break;
- case 'Z':
- opt_zero = true;
- break;
+ case 'z':
+ opt_zero = false;
+ break;
+ case 'Z':
+ opt_zero = true;
+ break;
#endif
- default: {
- char cbuf[2];
-
- cbuf[0] = opts[i];
- cbuf[1] = '\0';
- _malloc_message(_getprogname(),
- ": (malloc) Unsupported character "
- "in malloc options: '", cbuf,
- "'\n");
- }
+ default: {
+ char cbuf[2];
+
+ cbuf[0] = opts[i];
+ cbuf[1] = '\0';
+ _malloc_message(_getprogname(),
+ ": (malloc) Unsupported character "
+ "in malloc options: '",
+ cbuf,
+ "'\n");
+ }
}
}
}
}
#ifndef MALLOC_STATIC_PAGESIZE
// Set bin-related variables.
bin_maxclass = (pagesize >> 1);
@@ -4140,17 +4221,17 @@ MALLOC_OUT:
arena_chunk_header_npages = calculate_arena_header_pages();
arena_maxclass = calculate_arena_maxclass();
#endif
gRecycledSize = 0;
// Various sanity checks that regard configuration.
- MOZ_ASSERT(quantum >= sizeof(void *));
+ MOZ_ASSERT(quantum >= sizeof(void*));
MOZ_ASSERT(quantum <= pagesize);
MOZ_ASSERT(chunksize >= pagesize);
MOZ_ASSERT(quantum * 4 <= chunksize);
// Initialize chunks data.
chunks_mtx.Init();
gChunksBySize.Init();
gChunksByAddress.Init();
@@ -4189,48 +4270,53 @@ MALLOC_OUT:
malloc_initialized = true;
// Dummy call so that the function is not removed by dead-code elimination
Debug::jemalloc_ptr_info(nullptr);
#if !defined(XP_WIN) && !defined(XP_DARWIN)
// Prevent potential deadlock on malloc locks after fork.
- pthread_atfork(_malloc_prefork, _malloc_postfork_parent, _malloc_postfork_child);
+ pthread_atfork(
+ _malloc_prefork, _malloc_postfork_parent, _malloc_postfork_child);
#endif
return false;
}
// End general internal functions.
// ***************************************************************************
// Begin malloc(3)-compatible functions.
// The BaseAllocator class is a helper class that implements the base allocator
// functions (malloc, calloc, realloc, free, memalign) for a given arena,
// or an appropriately chosen arena (per choose_arena()) when none is given.
-struct BaseAllocator {
-#define MALLOC_DECL(name, return_type, ...) \
+struct BaseAllocator
+{
+#define MALLOC_DECL(name, return_type, ...) \
inline return_type name(__VA_ARGS__);
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
#include "malloc_decls.h"
- explicit BaseAllocator(arena_t* aArena) : mArena(aArena) { }
+ explicit BaseAllocator(arena_t* aArena)
+ : mArena(aArena)
+ {
+ }
private:
arena_t* mArena;
};
-#define MALLOC_DECL(name, return_type, ...) \
- template<> inline return_type \
- MozJemalloc::name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
- { \
- BaseAllocator allocator(nullptr); \
- return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
+#define MALLOC_DECL(name, return_type, ...) \
+ template<> \
+ inline return_type MozJemalloc::name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
+ { \
+ BaseAllocator allocator(nullptr); \
+ return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
}
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
#include "malloc_decls.h"
inline void*
BaseAllocator::malloc(size_t aSize)
{
void* ret;
@@ -4273,32 +4359,32 @@ BaseAllocator::memalign(size_t aAlignmen
ret = ipalloc(aAlignment, aSize, mArena);
return ret;
}
inline void*
BaseAllocator::calloc(size_t aNum, size_t aSize)
{
- void *ret;
+ void* ret;
if (malloc_init()) {
ret = nullptr;
goto RETURN;
}
size_t num_size = aNum * aSize;
if (num_size == 0) {
num_size = 1;
- // Try to avoid division here. We know that it isn't possible to
- // overflow during multiplication if neither operand uses any of the
- // most significant half of the bits in a size_t.
- } else if (((aNum | aSize) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
- && (num_size / aSize != aNum)) {
+ // Try to avoid division here. We know that it isn't possible to
+ // overflow during multiplication if neither operand uses any of the
+ // most significant half of the bits in a size_t.
+ } else if (((aNum | aSize) & (SIZE_T_MAX << (sizeof(size_t) << 2))) &&
+ (num_size / aSize != aNum)) {
// size_t overflow.
ret = nullptr;
goto RETURN;
}
ret = imalloc(num_size, /* zero = */ true, mArena);
RETURN:
@@ -4353,18 +4439,19 @@ BaseAllocator::free(void* aPtr)
} else if (aPtr) {
huge_dalloc(aPtr);
}
}
template<void* (*memalign)(size_t, size_t)>
struct AlignedAllocator
{
- static inline int
- posix_memalign(void** aMemPtr, size_t aAlignment, size_t aSize)
+ static inline int posix_memalign(void** aMemPtr,
+ size_t aAlignment,
+ size_t aSize)
{
void* result;
// alignment must be a power of two and a multiple of sizeof(void*)
if (((aAlignment - 1) & aAlignment) != 0 || aAlignment < sizeof(void*)) {
return EINVAL;
}
@@ -4374,56 +4461,58 @@ struct AlignedAllocator
if (!result) {
return ENOMEM;
}
*aMemPtr = result;
return 0;
}
- static inline void*
- aligned_alloc(size_t aAlignment, size_t aSize)
+ static inline void* aligned_alloc(size_t aAlignment, size_t aSize)
{
if (aSize % aAlignment) {
return nullptr;
}
return memalign(aAlignment, aSize);
}
- static inline void*
- valloc(size_t aSize)
+ static inline void* valloc(size_t aSize)
{
return memalign(GetKernelPageSize(), aSize);
}
};
-template<> inline int
+template<>
+inline int
MozJemalloc::posix_memalign(void** aMemPtr, size_t aAlignment, size_t aSize)
{
return AlignedAllocator<memalign>::posix_memalign(aMemPtr, aAlignment, aSize);
}
-template<> inline void*
+template<>
+inline void*
MozJemalloc::aligned_alloc(size_t aAlignment, size_t aSize)
{
return AlignedAllocator<memalign>::aligned_alloc(aAlignment, aSize);
}
-template<> inline void*
+template<>
+inline void*
MozJemalloc::valloc(size_t aSize)
{
return AlignedAllocator<memalign>::valloc(aSize);
}
// End malloc(3)-compatible functions.
// ***************************************************************************
// Begin non-standard functions.
// This was added by Mozilla for use by SQLite.
-template<> inline size_t
+template<>
+inline size_t
MozJemalloc::malloc_good_size(size_t aSize)
{
// This duplicates the logic in imalloc(), arena_malloc() and
// arena_t::MallocSmall().
if (aSize < small_min) {
// Small (tiny).
aSize = pow2_ceil(aSize);
@@ -4447,24 +4536,25 @@ MozJemalloc::malloc_good_size(size_t aSi
// CHUNK_CEILING to get csize. This ensures that this
// malloc_usable_size(malloc(n)) always matches
// malloc_good_size(n).
aSize = PAGE_CEILING(aSize);
}
return aSize;
}
-
-template<> inline size_t
+template<>
+inline size_t
MozJemalloc::malloc_usable_size(usable_ptr_t aPtr)
{
return isalloc_validate(aPtr);
}
-template<> inline void
+template<>
+inline void
MozJemalloc::jemalloc_stats(jemalloc_stats_t* aStats)
{
size_t non_arena_mapped, chunk_header_size;
MOZ_ASSERT(aStats);
// Gather runtime settings.
aStats->opt_junk = opt_junk;
@@ -4502,17 +4592,17 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
aStats->bookkeeping += base_committed;
MOZ_ASSERT(base_mapped >= base_committed);
}
arenas_lock.Lock();
// Iterate over arenas.
for (auto arena : gArenaTree.iter()) {
size_t arena_mapped, arena_allocated, arena_committed, arena_dirty, j,
- arena_unused, arena_headers;
+ arena_unused, arena_headers;
arena_run_t* run;
if (!arena) {
continue;
}
arena_headers = 0;
arena_unused = 0;
@@ -4520,18 +4610,18 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
{
MutexAutoLock lock(arena->mLock);
arena_mapped = arena->mStats.mapped;
// "committed" counts dirty and allocated memory.
arena_committed = arena->mStats.committed << pagesize_2pow;
- arena_allocated = arena->mStats.allocated_small +
- arena->mStats.allocated_large;
+ arena_allocated =
+ arena->mStats.allocated_small + arena->mStats.allocated_large;
arena_dirty = arena->mNumDirty << pagesize_2pow;
for (j = 0; j < ntbins + nqbins + nsbins; j++) {
arena_bin_t* bin = &arena->mBins[j];
size_t bin_unused = 0;
for (auto mapelm : bin->runs.iter()) {
@@ -4551,34 +4641,34 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
MOZ_ASSERT(arena_mapped >= arena_committed);
MOZ_ASSERT(arena_committed >= arena_allocated + arena_dirty);
// "waste" is committed memory that is neither dirty nor
// allocated.
aStats->mapped += arena_mapped;
aStats->allocated += arena_allocated;
aStats->page_cache += arena_dirty;
- aStats->waste += arena_committed -
- arena_allocated - arena_dirty - arena_unused - arena_headers;
+ aStats->waste += arena_committed - arena_allocated - arena_dirty -
+ arena_unused - arena_headers;
aStats->bin_unused += arena_unused;
aStats->bookkeeping += arena_headers;
}
arenas_lock.Unlock();
// Account for arena chunk headers in bookkeeping rather than waste.
chunk_header_size =
- ((aStats->mapped / aStats->chunksize) * arena_chunk_header_npages) <<
- pagesize_2pow;
+ ((aStats->mapped / aStats->chunksize) * arena_chunk_header_npages)
+ << pagesize_2pow;
aStats->mapped += non_arena_mapped;
aStats->bookkeeping += chunk_header_size;
aStats->waste -= chunk_header_size;
MOZ_ASSERT(aStats->mapped >= aStats->allocated + aStats->waste +
- aStats->page_cache + aStats->bookkeeping);
+ aStats->page_cache + aStats->bookkeeping);
}
#ifdef MALLOC_DOUBLE_PURGE
// Explicitly remove all of this chunk's MADV_FREE'd pages from memory.
static void
hard_purge_chunk(arena_chunk_t* aChunk)
{
@@ -4615,37 +4705,39 @@ arena_t::HardPurge()
MutexAutoLock lock(mLock);
while (!mChunksMAdvised.isEmpty()) {
arena_chunk_t* chunk = mChunksMAdvised.popFront();
hard_purge_chunk(chunk);
}
}
-template<> inline void
+template<>
+inline void
MozJemalloc::jemalloc_purge_freed_pages()
{
MutexAutoLock lock(arenas_lock);
for (auto arena : gArenaTree.iter()) {
arena->HardPurge();
}
}
#else // !defined MALLOC_DOUBLE_PURGE
-template<> inline void
+template<>
+inline void
MozJemalloc::jemalloc_purge_freed_pages()
{
// Do nothing.
}
#endif // defined MALLOC_DOUBLE_PURGE
-
-template<> inline void
+template<>
+inline void
MozJemalloc::jemalloc_free_dirty_pages(void)
{
MutexAutoLock lock(arenas_lock);
for (auto arena : gArenaTree.iter()) {
MutexAutoLock arena_lock(arena->mLock);
arena->Purge(true);
}
}
@@ -4657,103 +4749,107 @@ arena_t::GetById(arena_id_t aArenaId)
key.mId = aArenaId;
MutexAutoLock lock(arenas_lock);
arena_t* result = gArenaTree.Search(&key);
MOZ_RELEASE_ASSERT(result);
return result;
}
#ifdef NIGHTLY_BUILD
-template<> inline arena_id_t
+template<>
+inline arena_id_t
MozJemalloc::moz_create_arena()
{
arena_t* arena = arenas_extend();
return arena->mId;
}
-template<> inline void
+template<>
+inline void
MozJemalloc::moz_dispose_arena(arena_id_t aArenaId)
{
arena_t* arena = arena_t::GetById(aArenaId);
MutexAutoLock lock(arenas_lock);
gArenaTree.Remove(arena);
// The arena is leaked, and remaining allocations in it still are alive
// until they are freed. After that, the arena will be empty but still
// taking have at least a chunk taking address space. TODO: bug 1364359.
}
-#define MALLOC_DECL(name, return_type, ...) \
- template<> inline return_type \
- MozJemalloc::moz_arena_ ## name(arena_id_t aArenaId, ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
- { \
- BaseAllocator allocator(arena_t::GetById(aArenaId)); \
- return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
+#define MALLOC_DECL(name, return_type, ...) \
+ template<> \
+ inline return_type MozJemalloc::moz_arena_##name( \
+ arena_id_t aArenaId, ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
+ { \
+ BaseAllocator allocator(arena_t::GetById(aArenaId)); \
+ return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
}
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
#include "malloc_decls.h"
#else
-#define MALLOC_DECL(name, return_type, ...) \
- template<> inline return_type \
- MozJemalloc::name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
- { \
- return DummyArenaAllocator<MozJemalloc>::name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
+#define MALLOC_DECL(name, return_type, ...) \
+ template<> \
+ inline return_type MozJemalloc::name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
+ { \
+ return DummyArenaAllocator<MozJemalloc>::name( \
+ ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
}
#define MALLOC_FUNCS MALLOC_FUNCS_ARENA
#include "malloc_decls.h"
#endif
// End non-standard functions.
// ***************************************************************************
// Begin library-private functions, used by threading libraries for protection
// of malloc during fork(). These functions are only called if the program is
// running in threaded mode, so there is no need to check whether the program
// is threaded here.
#ifndef XP_DARWIN
static
#endif
-void
-_malloc_prefork(void)
+ void
+ _malloc_prefork(void)
{
// Acquire all mutexes in a safe order.
arenas_lock.Lock();
for (auto arena : gArenaTree.iter()) {
arena->mLock.Lock();
}
base_mtx.Lock();
huge_mtx.Lock();
}
#ifndef XP_DARWIN
static
#endif
-void
-_malloc_postfork_parent(void)
+ void
+ _malloc_postfork_parent(void)
{
// Release all mutexes, now that fork() has completed.
huge_mtx.Unlock();
base_mtx.Unlock();
for (auto arena : gArenaTree.iter()) {
arena->mLock.Unlock();
}
arenas_lock.Unlock();
}
#ifndef XP_DARWIN
static
#endif
-void
-_malloc_postfork_child(void)
+ void
+ _malloc_postfork_child(void)
{
// Reinitialize all mutexes, now that fork() has completed.
huge_mtx.Init();
base_mtx.Init();
for (auto arena : gArenaTree.iter()) {
arena->mLock.Init();
@@ -4768,117 +4864,118 @@ void
// LD_PRELOAD or DYLD_INSERT_LIBRARIES on Linux/OSX. On this platform,
// the replacement functions are defined as variable pointers to the
// function resolved with GetProcAddress() instead of weak definitions
// of functions. On Android, the same needs to happen as well, because
// the Android linker doesn't handle weak linking with non LD_PRELOADed
// libraries, but LD_PRELOADing is not very convenient on Android, with
// the zygote.
#ifdef XP_DARWIN
-# define MOZ_REPLACE_WEAK __attribute__((weak_import))
+#define MOZ_REPLACE_WEAK __attribute__((weak_import))
#elif defined(XP_WIN) || defined(MOZ_WIDGET_ANDROID)
-# define MOZ_NO_REPLACE_FUNC_DECL
+#define MOZ_NO_REPLACE_FUNC_DECL
#elif defined(__GNUC__)
-# define MOZ_REPLACE_WEAK __attribute__((weak))
+#define MOZ_REPLACE_WEAK __attribute__((weak))
#endif
#include "replace_malloc.h"
-#define MALLOC_DECL(name, return_type, ...) \
- MozJemalloc::name,
+#define MALLOC_DECL(name, return_type, ...) MozJemalloc::name,
static const malloc_table_t malloc_table = {
#include "malloc_decls.h"
};
static malloc_table_t replace_malloc_table;
#ifdef MOZ_NO_REPLACE_FUNC_DECL
-# define MALLOC_DECL(name, return_type, ...) \
- typedef return_type (name##_impl_t)(__VA_ARGS__); \
- name##_impl_t* replace_##name = nullptr;
-# define MALLOC_FUNCS (MALLOC_FUNCS_INIT | MALLOC_FUNCS_BRIDGE)
-# include "malloc_decls.h"
+#define MALLOC_DECL(name, return_type, ...) \
+ typedef return_type(name##_impl_t)(__VA_ARGS__); \
+ name##_impl_t* replace_##name = nullptr;
+#define MALLOC_FUNCS (MALLOC_FUNCS_INIT | MALLOC_FUNCS_BRIDGE)
+#include "malloc_decls.h"
#endif
#ifdef XP_WIN
typedef HMODULE replace_malloc_handle_t;
static replace_malloc_handle_t
replace_malloc_handle()
{
char replace_malloc_lib[1024];
- if (GetEnvironmentVariableA("MOZ_REPLACE_MALLOC_LIB", replace_malloc_lib,
+ if (GetEnvironmentVariableA("MOZ_REPLACE_MALLOC_LIB",
+ replace_malloc_lib,
sizeof(replace_malloc_lib)) > 0) {
return LoadLibraryA(replace_malloc_lib);
}
return nullptr;
}
-# define REPLACE_MALLOC_GET_FUNC(handle, name) \
- (name##_impl_t*) GetProcAddress(handle, "replace_" # name)
+#define REPLACE_MALLOC_GET_FUNC(handle, name) \
+ (name##_impl_t*)GetProcAddress(handle, "replace_" #name)
#elif defined(ANDROID)
-# include <dlfcn.h>
+#include <dlfcn.h>
typedef void* replace_malloc_handle_t;
static replace_malloc_handle_t
replace_malloc_handle()
{
- const char *replace_malloc_lib = getenv("MOZ_REPLACE_MALLOC_LIB");
+ const char* replace_malloc_lib = getenv("MOZ_REPLACE_MALLOC_LIB");
if (replace_malloc_lib && *replace_malloc_lib) {
return dlopen(replace_malloc_lib, RTLD_LAZY);
}
return nullptr;
}
-# define REPLACE_MALLOC_GET_FUNC(handle, name) \
- (name##_impl_t*) dlsym(handle, "replace_" # name)
+#define REPLACE_MALLOC_GET_FUNC(handle, name) \
+ (name##_impl_t*)dlsym(handle, "replace_" #name)
#else
typedef bool replace_malloc_handle_t;
static replace_malloc_handle_t
replace_malloc_handle()
{
return true;
}
-# define REPLACE_MALLOC_GET_FUNC(handle, name) \
- replace_##name
+#define REPLACE_MALLOC_GET_FUNC(handle, name) replace_##name
#endif
-static void replace_malloc_init_funcs();
+static void
+replace_malloc_init_funcs();
// Below is the malloc implementation overriding jemalloc and calling the
// replacement functions if they exist.
static int replace_malloc_initialized = 0;
static void
init()
{
replace_malloc_init_funcs();
// Set this *before* calling replace_init, otherwise if replace_init calls
// malloc() we'll get an infinite loop.
replace_malloc_initialized = 1;
if (replace_init) {
replace_init(&malloc_table);
}
}
-#define MALLOC_DECL(name, return_type, ...) \
- template<> inline return_type \
- ReplaceMalloc::name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
- { \
- if (MOZ_UNLIKELY(!replace_malloc_initialized)) { \
- init(); \
- } \
- return replace_malloc_table.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
+#define MALLOC_DECL(name, return_type, ...) \
+ template<> \
+ inline return_type ReplaceMalloc::name( \
+ ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
+ { \
+ if (MOZ_UNLIKELY(!replace_malloc_initialized)) { \
+ init(); \
+ } \
+ return replace_malloc_table.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
}
#include "malloc_decls.h"
MOZ_JEMALLOC_API struct ReplaceMallocBridge*
get_bridge(void)
{
if (MOZ_UNLIKELY(!replace_malloc_initialized)) {
init();
@@ -4895,79 +4992,84 @@ get_bridge(void)
// replace_valloc, and default implementations will be automatically derived
// from replace_memalign.
static void
replace_malloc_init_funcs()
{
replace_malloc_handle_t handle = replace_malloc_handle();
if (handle) {
#ifdef MOZ_NO_REPLACE_FUNC_DECL
-# define MALLOC_DECL(name, ...) \
- replace_##name = REPLACE_MALLOC_GET_FUNC(handle, name);
-
-# define MALLOC_FUNCS (MALLOC_FUNCS_INIT | MALLOC_FUNCS_BRIDGE)
-# include "malloc_decls.h"
+#define MALLOC_DECL(name, ...) \
+ replace_##name = REPLACE_MALLOC_GET_FUNC(handle, name);
+
+#define MALLOC_FUNCS (MALLOC_FUNCS_INIT | MALLOC_FUNCS_BRIDGE)
+#include "malloc_decls.h"
#endif
-#define MALLOC_DECL(name, ...) \
+#define MALLOC_DECL(name, ...) \
replace_malloc_table.name = REPLACE_MALLOC_GET_FUNC(handle, name);
#include "malloc_decls.h"
}
if (!replace_malloc_table.posix_memalign && replace_malloc_table.memalign) {
- replace_malloc_table.posix_memalign = AlignedAllocator<ReplaceMalloc::memalign>::posix_memalign;
+ replace_malloc_table.posix_memalign =
+ AlignedAllocator<ReplaceMalloc::memalign>::posix_memalign;
}
if (!replace_malloc_table.aligned_alloc && replace_malloc_table.memalign) {
- replace_malloc_table.aligned_alloc = AlignedAllocator<ReplaceMalloc::memalign>::aligned_alloc;
+ replace_malloc_table.aligned_alloc =
+ AlignedAllocator<ReplaceMalloc::memalign>::aligned_alloc;
}
if (!replace_malloc_table.valloc && replace_malloc_table.memalign) {
- replace_malloc_table.valloc = AlignedAllocator<ReplaceMalloc::memalign>::valloc;
+ replace_malloc_table.valloc =
+ AlignedAllocator<ReplaceMalloc::memalign>::valloc;
}
if (!replace_malloc_table.moz_create_arena && replace_malloc_table.malloc) {
-#define MALLOC_DECL(name, ...) \
- replace_malloc_table.name = DummyArenaAllocator<ReplaceMalloc>::name;
+#define MALLOC_DECL(name, ...) \
+ replace_malloc_table.name = DummyArenaAllocator<ReplaceMalloc>::name;
#define MALLOC_FUNCS MALLOC_FUNCS_ARENA
#include "malloc_decls.h"
}
-#define MALLOC_DECL(name, ...) \
- if (!replace_malloc_table.name) { \
- replace_malloc_table.name = MozJemalloc::name; \
+#define MALLOC_DECL(name, ...) \
+ if (!replace_malloc_table.name) { \
+ replace_malloc_table.name = MozJemalloc::name; \
}
#include "malloc_decls.h"
}
#endif // MOZ_REPLACE_MALLOC
-// ***************************************************************************
-// Definition of all the _impl functions
-
-#define GENERIC_MALLOC_DECL2(name, name_impl, return_type, ...) \
- return_type name_impl(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
- { \
- return DefaultMalloc::name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
- }
-
-#define GENERIC_MALLOC_DECL(name, return_type, ...) \
+ // ***************************************************************************
+ // Definition of all the _impl functions
+
+#define GENERIC_MALLOC_DECL2(name, name_impl, return_type, ...) \
+ return_type name_impl(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
+ { \
+ return DefaultMalloc::name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
+ }
+
+#define GENERIC_MALLOC_DECL(name, return_type, ...) \
GENERIC_MALLOC_DECL2(name, name##_impl, return_type, ##__VA_ARGS__)
-#define MALLOC_DECL(...) MOZ_MEMORY_API MACRO_CALL(GENERIC_MALLOC_DECL, (__VA_ARGS__))
+#define MALLOC_DECL(...) \
+ MOZ_MEMORY_API MACRO_CALL(GENERIC_MALLOC_DECL, (__VA_ARGS__))
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
#include "malloc_decls.h"
#undef GENERIC_MALLOC_DECL
-#define GENERIC_MALLOC_DECL(name, return_type, ...) \
+#define GENERIC_MALLOC_DECL(name, return_type, ...) \
GENERIC_MALLOC_DECL2(name, name, return_type, ##__VA_ARGS__)
-#define MALLOC_DECL(...) MOZ_JEMALLOC_API MACRO_CALL(GENERIC_MALLOC_DECL, (__VA_ARGS__))
+#define MALLOC_DECL(...) \
+ MOZ_JEMALLOC_API MACRO_CALL(GENERIC_MALLOC_DECL, (__VA_ARGS__))
#define MALLOC_FUNCS (MALLOC_FUNCS_JEMALLOC | MALLOC_FUNCS_ARENA)
#include "malloc_decls.h"
-// ***************************************************************************
+ // ***************************************************************************
#ifdef HAVE_DLOPEN
-# include <dlfcn.h>
+#include <dlfcn.h>
#endif
#if defined(__GLIBC__) && !defined(__UCLIBC__)
// glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
// to inconsistently reference libc's malloc(3)-compatible functions
// (bug 493541).
//
// These definitions interpose hooks in glibc. The functions are actually
@@ -4980,17 +5082,17 @@ MOZ_EXPORT void* (*__malloc_hook)(size_t
MOZ_EXPORT void* (*__realloc_hook)(void*, size_t) = realloc_impl;
MOZ_EXPORT void* (*__memalign_hook)(size_t, size_t) = memalign_impl;
}
#elif defined(RTLD_DEEPBIND)
// XXX On systems that support RTLD_GROUP or DF_1_GROUP, do their
// implementations permit similar inconsistencies? Should STV_SINGLETON
// visibility be used for interposition where available?
-# error "Interposing malloc is unsafe on this system without libc malloc hooks."
+#error "Interposing malloc is unsafe on this system without libc malloc hooks."
#endif
#ifdef XP_WIN
void*
_recalloc(void* aPtr, size_t aCount, size_t aSize)
{
size_t oldsize = aPtr ? isalloc(aPtr) : 0;
size_t newsize = aCount * aSize;
@@ -5025,29 +5127,27 @@ size_t
{
return DefaultMalloc::malloc_usable_size(aPtr);
}
// In the new style jemalloc integration jemalloc is built as a separate
// shared library. Since we're no longer hooking into the CRT binary,
// we need to initialize the heap at the first opportunity we get.
// DLL_PROCESS_ATTACH in DllMain is that opportunity.
-BOOL APIENTRY DllMain(HINSTANCE hModule,
- DWORD reason,
- LPVOID lpReserved)
+BOOL APIENTRY
+DllMain(HINSTANCE hModule, DWORD reason, LPVOID lpReserved)
{
switch (reason) {
case DLL_PROCESS_ATTACH:
// Don't force the system to page DllMain back in every time
// we create/destroy a thread
DisableThreadLibraryCalls(hModule);
// Initialize the heap
malloc_init_hard();
break;
case DLL_PROCESS_DETACH:
break;
-
}
return TRUE;
}
#endif