--- a/memory/mozjemalloc/jemalloc.c
+++ b/memory/mozjemalloc/jemalloc.c
@@ -145,22 +145,16 @@
*/
#ifdef MOZ_WIDGET_GONK
/* Reduce the amount of unused dirty pages to 1MiB on B2G */
# define MOZ_MALLOC_OPTIONS "ff"
#else
# define MOZ_MALLOC_OPTIONS ""
#endif
-/*
- * MALLOC_STATS enables statistics calculation, and is required for
- * jemalloc_stats().
- */
-#define MALLOC_STATS
-
/* Memory filling (junk/poison/zero). */
#define MALLOC_FILL
#ifndef MALLOC_PRODUCTION
/*
* MALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
@@ -511,18 +505,16 @@ static malloc_mutex_t init_lock = PTHREA
static malloc_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
#endif
/******************************************************************************/
/*
* Statistics data structures.
*/
-#ifdef MALLOC_STATS
-
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
struct malloc_bin_stats_s {
/*
* Number of allocation requests that corresponded to the size of this
* bin.
*/
uint64_t nrequests;
@@ -573,18 +565,16 @@ struct arena_stats_s {
uint64_t nmalloc_small;
uint64_t ndalloc_small;
size_t allocated_large;
uint64_t nmalloc_large;
uint64_t ndalloc_large;
};
-#endif /* #ifdef MALLOC_STATS */
-
/******************************************************************************/
/*
* Extent data structures.
*/
/* Tree of extents. */
typedef struct extent_node_s extent_node_t;
struct extent_node_s {
@@ -703,21 +693,19 @@ struct arena_chunk_map_s {
* are madvised (with either MADV_DONTNEED or MADV_FREE) are marked with
* CHUNK_MAP_MADVISED.
*
* Otherwise, if MALLOC_DECOMMIT is not defined and MALLOC_DOUBLE_PURGE is
* defined, then a page which is madvised is marked as CHUNK_MAP_MADVISED.
* When it's finally freed with jemalloc_purge_freed_pages, the page is marked
* as CHUNK_MAP_DECOMMITTED.
*/
-#if defined(MALLOC_DECOMMIT) || defined(MALLOC_STATS) || defined(MALLOC_DOUBLE_PURGE)
#define CHUNK_MAP_MADVISED ((size_t)0x40U)
#define CHUNK_MAP_DECOMMITTED ((size_t)0x20U)
#define CHUNK_MAP_MADVISED_OR_DECOMMITTED (CHUNK_MAP_MADVISED | CHUNK_MAP_DECOMMITTED)
-#endif
#define CHUNK_MAP_KEY ((size_t)0x10U)
#define CHUNK_MAP_DIRTY ((size_t)0x08U)
#define CHUNK_MAP_ZEROED ((size_t)0x04U)
#define CHUNK_MAP_LARGE ((size_t)0x02U)
#define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
};
typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
@@ -795,34 +783,30 @@ struct arena_bin_s {
uint32_t nregs;
/* Number of elements in a run's regs_mask for this bin's size class. */
uint32_t regs_mask_nelms;
/* Offset of first region in a run for this bin's size class. */
uint32_t reg0_offset;
-#ifdef MALLOC_STATS
/* Bin statistics. */
malloc_bin_stats_t stats;
-#endif
};
struct arena_s {
#if defined(MALLOC_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
uint32_t magic;
# define ARENA_MAGIC 0x947d3d24
#endif
/* All operations on this arena require that lock be locked. */
malloc_spinlock_t lock;
-#ifdef MALLOC_STATS
arena_stats_t stats;
-#endif
/* Tree of dirty-page-containing chunks this arena manages. */
arena_chunk_tree_t chunks_dirty;
#ifdef MALLOC_DOUBLE_PURGE
/* Head of a linked list of MADV_FREE'd-page-containing chunks this
* arena manages. */
LinkedList chunks_madvised;
@@ -1037,46 +1021,40 @@ static extent_tree_t chunks_szad_mmap;
static extent_tree_t chunks_ad_mmap;
/* Protects huge allocation-related data structures. */
static malloc_mutex_t huge_mtx;
/* Tree of chunks that are stand-alone huge allocations. */
static extent_tree_t huge;
-#ifdef MALLOC_STATS
/* Huge allocation statistics. */
static uint64_t huge_nmalloc;
static uint64_t huge_ndalloc;
static size_t huge_allocated;
static size_t huge_mapped;
-#endif
/****************************/
/*
* base (internal allocation).
*/
/*
* Current pages that are being used for internal memory allocations. These
* pages are carved up in cacheline-size quanta, so that there is no chance of
* false cache line sharing.
*/
static void *base_pages;
static void *base_next_addr;
-#if defined(MALLOC_DECOMMIT) || defined(MALLOC_STATS)
static void *base_next_decommitted;
-#endif
static void *base_past_addr; /* Addr immediately past base_pages. */
static extent_node_t *base_nodes;
static malloc_mutex_t base_mtx;
-#ifdef MALLOC_STATS
static size_t base_mapped;
static size_t base_committed;
-#endif
/********/
/*
* Arenas.
*/
/*
* Arenas that are used to service external requests. Not all elements of the
@@ -1142,31 +1120,27 @@ static bool opt_xmalloc = false;
* Begin function prototypes for non-inline static functions.
*/
static char *umax2s(uintmax_t x, unsigned base, char *s);
static bool malloc_mutex_init(malloc_mutex_t *mutex);
static bool malloc_spin_init(malloc_spinlock_t *lock);
static void wrtmessage(const char *p1, const char *p2, const char *p3,
const char *p4);
-#ifdef MALLOC_STATS
#ifdef MOZ_MEMORY_DARWIN
/* Avoid namespace collision with OS X's malloc APIs. */
#define malloc_printf moz_malloc_printf
#endif
static void malloc_printf(const char *format, ...);
-#endif
static bool base_pages_alloc(size_t minsize);
static void *base_alloc(size_t size);
static void *base_calloc(size_t number, size_t size);
static extent_node_t *base_node_alloc(void);
static void base_node_dealloc(extent_node_t *node);
-#ifdef MALLOC_STATS
static void stats_print(arena_t *arena);
-#endif
static void *pages_map(void *addr, size_t size);
static void pages_unmap(void *addr, size_t size);
static void *chunk_alloc_mmap(size_t size, size_t alignment);
static void *chunk_recycle(extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, size_t size,
size_t alignment, bool base, bool *zero);
static void *chunk_alloc(size_t size, size_t alignment, bool base, bool zero);
static void chunk_record(extent_tree_t *chunks_szad,
@@ -1516,32 +1490,30 @@ pow2_ceil(size_t x)
static inline const char *
_getprogname(void)
{
return ("<jemalloc>");
}
-#ifdef MALLOC_STATS
/*
* Print to stderr in such a way as to (hopefully) avoid memory allocation.
*/
static void
malloc_printf(const char *format, ...)
{
char buf[4096];
va_list ap;
va_start(ap, format);
vsnprintf(buf, sizeof(buf), format, ap);
va_end(ap);
_malloc_message(buf, "", "", "");
}
-#endif
/******************************************************************************/
static inline void
pages_decommit(void *addr, size_t size)
{
#ifdef MOZ_MEMORY_WINDOWS
@@ -1595,43 +1567,37 @@ pages_commit(void *addr, size_t size)
MozTagAnonymousMemory(addr, size, "jemalloc");
# endif
}
static bool
base_pages_alloc(size_t minsize)
{
size_t csize;
-#if defined(MALLOC_DECOMMIT) || defined(MALLOC_STATS)
size_t pminsize;
-#endif
assert(minsize != 0);
csize = CHUNK_CEILING(minsize);
base_pages = chunk_alloc(csize, chunksize, true, false);
if (base_pages == NULL)
return (true);
base_next_addr = base_pages;
base_past_addr = (void *)((uintptr_t)base_pages + csize);
-#if defined(MALLOC_DECOMMIT) || defined(MALLOC_STATS)
/*
* Leave enough pages for minsize committed, since otherwise they would
* have to be immediately recommitted.
*/
pminsize = PAGE_CEILING(minsize);
base_next_decommitted = (void *)((uintptr_t)base_pages + pminsize);
# if defined(MALLOC_DECOMMIT)
if (pminsize < csize)
pages_decommit(base_next_decommitted, csize - pminsize);
# endif
-# ifdef MALLOC_STATS
base_mapped += csize;
base_committed += pminsize;
-# endif
-#endif
return (false);
}
static void *
base_alloc(size_t size)
{
void *ret;
@@ -1646,33 +1612,29 @@ base_alloc(size_t size)
if (base_pages_alloc(csize)) {
malloc_mutex_unlock(&base_mtx);
return (NULL);
}
}
/* Allocate. */
ret = base_next_addr;
base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
-#if defined(MALLOC_DECOMMIT) || defined(MALLOC_STATS)
/* Make sure enough pages are committed for the new allocation. */
if ((uintptr_t)base_next_addr > (uintptr_t)base_next_decommitted) {
void *pbase_next_addr =
(void *)(PAGE_CEILING((uintptr_t)base_next_addr));
# ifdef MALLOC_DECOMMIT
pages_commit(base_next_decommitted, (uintptr_t)pbase_next_addr -
(uintptr_t)base_next_decommitted);
# endif
base_next_decommitted = pbase_next_addr;
-# ifdef MALLOC_STATS
base_committed += (uintptr_t)pbase_next_addr -
(uintptr_t)base_next_decommitted;
-# endif
}
-#endif
malloc_mutex_unlock(&base_mtx);
return (ret);
}
static void *
base_calloc(size_t number, size_t size)
{
@@ -1709,17 +1671,16 @@ base_node_dealloc(extent_node_t *node)
malloc_mutex_lock(&base_mtx);
*(extent_node_t **)node = base_nodes;
base_nodes = node;
malloc_mutex_unlock(&base_mtx);
}
/******************************************************************************/
-#ifdef MALLOC_STATS
static void
stats_print(arena_t *arena)
{
unsigned i, gap_start;
#ifdef MOZ_MEMORY_WINDOWS
malloc_printf("dirty: %Iu page%s dirty, %I64u sweep%s,"
" %I64u madvise%s, %I64u page%s purged\n",
@@ -1820,17 +1781,16 @@ stats_print(arena_t *arena)
/* Gap of more than one size class. */
malloc_printf("[%u..%u]\n", gap_start, i - 1);
} else {
/* Gap of one size class. */
malloc_printf("[%u]\n", gap_start);
}
}
}
-#endif
/*
* End Utility functions/macros.
*/
/******************************************************************************/
/*
* Begin extent tree code.
*/
@@ -2940,17 +2900,16 @@ arena_run_split(arena_t *arena, arena_ru
chunk->map[run_ind+total_pages-1].bits = (rem_pages <<
pagesize_2pow) | (chunk->map[run_ind+total_pages-1].bits &
pagesize_mask);
arena_avail_tree_insert(&arena->runs_avail,
&chunk->map[run_ind+need_pages]);
}
for (i = 0; i < need_pages; i++) {
-#if defined(MALLOC_DECOMMIT) || defined(MALLOC_STATS) || defined(MALLOC_DOUBLE_PURGE)
/*
* Commit decommitted pages if necessary. If a decommitted
* page is encountered, commit all needed adjacent decommitted
* pages in one operation, in order to reduce system call
* overhead.
*/
if (chunk->map[run_ind + i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) {
size_t j;
@@ -2968,33 +2927,27 @@ arena_run_split(arena_t *arena, arena_ru
chunk->map[run_ind + i + j].bits &=
~CHUNK_MAP_MADVISED_OR_DECOMMITTED;
}
# ifdef MALLOC_DECOMMIT
pages_commit((void *)((uintptr_t)chunk + ((run_ind + i)
<< pagesize_2pow)), (j << pagesize_2pow));
-# ifdef MALLOC_STATS
arena->stats.ncommit++;
-# endif
# endif
-# ifdef MALLOC_STATS
arena->stats.committed += j;
-# endif
# ifndef MALLOC_DECOMMIT
}
# else
} else /* No need to zero since commit zeros. */
# endif
-#endif
-
/* Zero if necessary. */
if (zero) {
if ((chunk->map[run_ind + i].bits & CHUNK_MAP_ZEROED)
== 0) {
memset((void *)((uintptr_t)chunk + ((run_ind
+ i) << pagesize_2pow)), 0, pagesize);
/* CHUNK_MAP_ZEROED is cleared below. */
}
@@ -3031,19 +2984,17 @@ arena_run_split(arena_t *arena, arena_ru
}
static void
arena_chunk_init(arena_t *arena, arena_chunk_t *chunk)
{
arena_run_t *run;
size_t i;
-#ifdef MALLOC_STATS
arena->stats.mapped += chunksize;
-#endif
chunk->arena = arena;
/*
* Claim that no pages are in use, since the header is merely overhead.
*/
chunk->ndirty = 0;
@@ -3059,24 +3010,20 @@ arena_chunk_init(arena_t *arena, arena_c
chunk->map[chunk_npages-1].bits = arena_maxclass | CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED;
#ifdef MALLOC_DECOMMIT
/*
* Start out decommitted, in order to force a closer correspondence
* between dirty pages and committed untouched pages.
*/
pages_decommit(run, arena_maxclass);
-# ifdef MALLOC_STATS
arena->stats.ndecommit++;
arena->stats.decommitted += (chunk_npages - arena_chunk_header_npages);
-# endif
-#endif
-#ifdef MALLOC_STATS
+#endif
arena->stats.committed += arena_chunk_header_npages;
-#endif
/* Insert the run into the runs_avail tree. */
arena_avail_tree_insert(&arena->runs_avail,
&chunk->map[arena_chunk_header_npages]);
#ifdef MALLOC_DOUBLE_PURGE
LinkedList_Init(&chunk->chunks_madvised_elem);
#endif
@@ -3086,31 +3033,27 @@ static void
arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
{
if (arena->spare != NULL) {
if (arena->spare->ndirty > 0) {
arena_chunk_tree_dirty_remove(
&chunk->arena->chunks_dirty, arena->spare);
arena->ndirty -= arena->spare->ndirty;
-#ifdef MALLOC_STATS
arena->stats.committed -= arena->spare->ndirty;
-#endif
}
#ifdef MALLOC_DOUBLE_PURGE
/* This is safe to do even if arena->spare is not in the list. */
LinkedList_Remove(&arena->spare->chunks_madvised_elem);
#endif
chunk_dealloc((void *)arena->spare, chunksize);
-#ifdef MALLOC_STATS
arena->stats.mapped -= chunksize;
arena->stats.committed -= arena_chunk_header_npages;
-#endif
}
/*
* Remove run from runs_avail, so that the arena does not use it.
* Dirty page flushing only uses the chunks_dirty tree, so leaving this
* chunk in the chunks_* trees is sufficient for that purpose.
*/
arena_avail_tree_remove(&arena->runs_avail,
@@ -3189,19 +3132,17 @@ arena_purge(arena_t *arena, bool all)
rb_foreach_begin(arena_chunk_t, link_dirty, &arena->chunks_dirty,
chunk) {
ndirty += chunk->ndirty;
} rb_foreach_end(arena_chunk_t, link_dirty, &arena->chunks_dirty, chunk)
assert(ndirty == arena->ndirty);
#endif
RELEASE_ASSERT(all || (arena->ndirty > opt_dirty_max));
-#ifdef MALLOC_STATS
arena->stats.npurge++;
-#endif
/*
* Iterate downward through chunks until enough dirty memory has been
* purged. Terminate as soon as possible in order to minimize the
* number of system calls, even if a chunk has only been partially
* purged.
*/
while (arena->ndirty > (dirty_max >> 1)) {
@@ -3235,37 +3176,31 @@ arena_purge(arena_t *arena, bool all)
}
chunk->ndirty -= npages;
arena->ndirty -= npages;
#ifdef MALLOC_DECOMMIT
pages_decommit((void *)((uintptr_t)
chunk + (i << pagesize_2pow)),
(npages << pagesize_2pow));
-# ifdef MALLOC_STATS
arena->stats.ndecommit++;
arena->stats.decommitted += npages;
-# endif
-#endif
-#ifdef MALLOC_STATS
+#endif
arena->stats.committed -= npages;
-#endif
#ifndef MALLOC_DECOMMIT
madvise((void *)((uintptr_t)chunk + (i <<
pagesize_2pow)), (npages << pagesize_2pow),
MADV_FREE);
# ifdef MALLOC_DOUBLE_PURGE
madvised = true;
# endif
#endif
-#ifdef MALLOC_STATS
arena->stats.nmadvise++;
arena->stats.purged += npages;
-#endif
if (arena->ndirty <= (dirty_max >> 1))
break;
}
}
if (chunk->ndirty == 0) {
arena_chunk_tree_dirty_remove(&arena->chunks_dirty,
chunk);
@@ -3440,19 +3375,17 @@ arena_bin_nonfull_run_get(arena_t *arena
unsigned i, remainder;
/* Look for a usable run. */
mapelm = arena_run_tree_first(&bin->runs);
if (mapelm != NULL) {
/* run is guaranteed to have available space. */
arena_run_tree_remove(&bin->runs, mapelm);
run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
-#ifdef MALLOC_STATS
bin->stats.reruns++;
-#endif
return (run);
}
/* No existing runs have any space available. */
/* Allocate a new run. */
run = arena_run_alloc(arena, bin, bin->run_size, false, false);
if (run == NULL)
return (NULL);
@@ -3479,22 +3412,20 @@ arena_bin_nonfull_run_get(arena_t *arena
run->regs_minelm = 0;
run->nfree = bin->nregs;
#if defined(MALLOC_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
run->magic = ARENA_RUN_MAGIC;
#endif
-#ifdef MALLOC_STATS
bin->stats.nruns++;
bin->stats.curruns++;
if (bin->stats.curruns > bin->stats.highruns)
bin->stats.highruns = bin->stats.curruns;
-#endif
return (run);
}
/* bin->runcur must have space available before this function is called. */
static inline void *
arena_bin_malloc_easy(arena_t *arena, arena_bin_t *bin, arena_run_t *run)
{
void *ret;
@@ -3612,25 +3543,23 @@ arena_malloc_small(arena_t *arena, size_
arena_bin_t *bin;
arena_run_t *run;
if (size < small_min) {
/* Tiny. */
size = pow2_ceil(size);
bin = &arena->bins[ffs((int)(size >> (TINY_MIN_2POW +
1)))];
-#if (!defined(NDEBUG) || defined(MALLOC_STATS))
/*
* Bin calculation is always correct, but we may need
* to fix size for the purposes of assertions and/or
* stats accuracy.
*/
if (size < (1U << TINY_MIN_2POW))
size = (1U << TINY_MIN_2POW);
-#endif
} else if (size <= small_max) {
/* Quantum-spaced. */
size = QUANTUM_CEILING(size);
bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
- 1];
} else {
/* Sub-page. */
size = pow2_ceil(size);
@@ -3645,21 +3574,19 @@ arena_malloc_small(arena_t *arena, size_
else
ret = arena_bin_malloc_hard(arena, bin);
if (ret == NULL) {
malloc_spin_unlock(&arena->lock);
return (NULL);
}
-#ifdef MALLOC_STATS
bin->stats.nrequests++;
arena->stats.nmalloc_small++;
arena->stats.allocated_small += size;
-#endif
malloc_spin_unlock(&arena->lock);
if (zero == false) {
#ifdef MALLOC_FILL
if (opt_junk)
memset(ret, 0xe4, size);
else if (opt_zero)
memset(ret, 0, size);
@@ -3678,20 +3605,18 @@ arena_malloc_large(arena_t *arena, size_
/* Large allocation. */
size = PAGE_CEILING(size);
malloc_spin_lock(&arena->lock);
ret = (void *)arena_run_alloc(arena, NULL, size, true, zero);
if (ret == NULL) {
malloc_spin_unlock(&arena->lock);
return (NULL);
}
-#ifdef MALLOC_STATS
arena->stats.nmalloc_large++;
arena->stats.allocated_large += size;
-#endif
malloc_spin_unlock(&arena->lock);
if (zero == false) {
#ifdef MALLOC_FILL
if (opt_junk)
memset(ret, 0xe4, size);
else if (opt_zero)
memset(ret, 0, size);
@@ -3777,20 +3702,18 @@ arena_palloc(arena_t *arena, size_t alig
if (trailsize != 0) {
/* Trim trailing space. */
assert(trailsize < alloc_size);
arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, size + trailsize,
size, false);
}
}
-#ifdef MALLOC_STATS
arena->stats.nmalloc_large++;
arena->stats.allocated_large += size;
-#endif
malloc_spin_unlock(&arena->lock);
#ifdef MALLOC_FILL
if (opt_junk)
memset(ret, 0xe4, size);
else if (opt_zero)
memset(ret, 0, size);
#endif
@@ -4032,19 +3955,17 @@ arena_dalloc_small(arena_t *arena, arena
RELEASE_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
run_mapelm);
arena_run_tree_remove(&bin->runs, run_mapelm);
}
#if defined(MALLOC_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
run->magic = 0;
#endif
arena_run_dalloc(arena, run, true);
-#ifdef MALLOC_STATS
bin->stats.curruns--;
-#endif
} else if (run->nfree == 1 && run != bin->runcur) {
/*
* Make sure that bin->runcur always refers to the lowest
* non-full run, if one exists.
*/
if (bin->runcur == NULL)
bin->runcur = run;
else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
@@ -4071,48 +3992,33 @@ arena_dalloc_small(arena_t *arena, arena
arena_chunk_map_t *run_mapelm =
&chunk->map[run_pageind];
RELEASE_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
NULL);
arena_run_tree_insert(&bin->runs, run_mapelm);
}
}
-#ifdef MALLOC_STATS
arena->stats.allocated_small -= size;
arena->stats.ndalloc_small++;
-#endif
}
static void
arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
{
-#ifdef MALLOC_FILL
-#ifndef MALLOC_STATS
- if (opt_poison)
-#endif
-#endif
- {
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
- pagesize_2pow;
- size_t size = chunk->map[pageind].bits & ~pagesize_mask;
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
+ pagesize_2pow;
+ size_t size = chunk->map[pageind].bits & ~pagesize_mask;
#ifdef MALLOC_FILL
-#ifdef MALLOC_STATS
- if (opt_poison)
-#endif
- memset(ptr, 0xe5, size);
-#endif
-#ifdef MALLOC_STATS
- arena->stats.allocated_large -= size;
-#endif
- }
-#ifdef MALLOC_STATS
+ if (opt_poison)
+ memset(ptr, 0xe5, size);
+#endif
+ arena->stats.allocated_large -= size;
arena->stats.ndalloc_large++;
-#endif
arena_run_dalloc(arena, (arena_run_t *)ptr, true);
}
static inline void
arena_dalloc(void *ptr, size_t offset)
{
arena_chunk_t *chunk;
@@ -4166,19 +4072,17 @@ arena_ralloc_large_shrink(arena_t *arena
/*
* Shrink the run, and make trailing pages available for other
* allocations.
*/
malloc_spin_lock(&arena->lock);
arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
true);
-#ifdef MALLOC_STATS
arena->stats.allocated_large -= oldsize - size;
-#endif
malloc_spin_unlock(&arena->lock);
}
static bool
arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t size, size_t oldsize)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow;
@@ -4201,19 +4105,17 @@ arena_ralloc_large_grow(arena_t *arena,
((pageind+npages) << pagesize_2pow)), size - oldsize, true,
false);
chunk->map[pageind].bits = size | CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED;
chunk->map[pageind+npages].bits = CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED;
-#ifdef MALLOC_STATS
arena->stats.allocated_large += size - oldsize;
-#endif
malloc_spin_unlock(&arena->lock);
return (false);
}
malloc_spin_unlock(&arena->lock);
return (true);
}
@@ -4346,19 +4248,17 @@ arena_new(arena_t *arena)
{
unsigned i;
arena_bin_t *bin;
size_t pow2_size, prev_run_size;
if (malloc_spin_init(&arena->lock))
return (true);
-#ifdef MALLOC_STATS
memset(&arena->stats, 0, sizeof(arena_stats_t));
-#endif
/* Initialize chunks. */
arena_chunk_tree_dirty_new(&arena->chunks_dirty);
#ifdef MALLOC_DOUBLE_PURGE
LinkedList_Init(&arena->chunks_madvised);
#endif
arena->spare = NULL;
@@ -4374,50 +4274,44 @@ arena_new(arena_t *arena)
bin = &arena->bins[i];
bin->runcur = NULL;
arena_run_tree_new(&bin->runs);
bin->reg_size = (1ULL << (TINY_MIN_2POW + i));
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
-#ifdef MALLOC_STATS
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-#endif
}
/* Quantum-spaced bins. */
for (; i < ntbins + nqbins; i++) {
bin = &arena->bins[i];
bin->runcur = NULL;
arena_run_tree_new(&bin->runs);
bin->reg_size = quantum * (i - ntbins + 1);
pow2_size = pow2_ceil(quantum * (i - ntbins + 1));
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
-#ifdef MALLOC_STATS
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-#endif
}
/* (2^n)-spaced sub-page bins. */
for (; i < ntbins + nqbins + nsbins; i++) {
bin = &arena->bins[i];
bin->runcur = NULL;
arena_run_tree_new(&bin->runs);
bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
-#ifdef MALLOC_STATS
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-#endif
}
#if defined(MALLOC_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
arena->magic = ARENA_MAGIC;
#endif
return (false);
}
@@ -4528,17 +4422,16 @@ huge_palloc(size_t size, size_t alignmen
/* Insert node into huge. */
node->addr = ret;
psize = PAGE_CEILING(size);
node->size = psize;
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
-#ifdef MALLOC_STATS
huge_nmalloc++;
/* Although we allocated space for csize bytes, we indicate that we've
* allocated only psize bytes.
*
* If DECOMMIT is defined, this is a reasonable thing to do, since
* we'll explicitly decommit the bytes in excess of psize.
*
@@ -4551,17 +4444,16 @@ huge_palloc(size_t size, size_t alignmen
* requested if it first calls malloc_usable_size and finds out how
* much space it has to play with. But because we set node->size =
* psize above, malloc_usable_size will return psize, not csize, and
* the program will (hopefully) never touch bytes in excess of psize.
* Thus those bytes won't take up space in physical memory, and we can
* reasonably claim we never "allocated" them in the first place. */
huge_allocated += psize;
huge_mapped += csize;
-#endif
malloc_mutex_unlock(&huge_mtx);
#ifdef MALLOC_DECOMMIT
if (csize - psize > 0)
pages_decommit((void *)((uintptr_t)ret + psize), csize - psize);
#endif
#ifdef MALLOC_FILL
@@ -4609,21 +4501,19 @@ huge_ralloc(void *ptr, size_t size, size
oldsize - psize);
/* Update recorded size. */
malloc_mutex_lock(&huge_mtx);
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->size == oldsize);
-# ifdef MALLOC_STATS
huge_allocated -= oldsize - psize;
/* No need to change huge_mapped, because we didn't
* (un)map anything. */
-# endif
node->size = psize;
malloc_mutex_unlock(&huge_mtx);
} else if (psize > oldsize) {
pages_commit((void *)((uintptr_t)ptr + oldsize),
psize - oldsize);
}
#endif
@@ -4636,21 +4526,19 @@ huge_ralloc(void *ptr, size_t size, size
if (psize > oldsize) {
/* Update recorded size. */
extent_node_t *node, key;
malloc_mutex_lock(&huge_mtx);
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->size == oldsize);
-# ifdef MALLOC_STATS
huge_allocated += psize - oldsize;
/* No need to change huge_mapped, because we didn't
* (un)map anything. */
-# endif
node->size = psize;
malloc_mutex_unlock(&huge_mtx);
}
#ifdef MALLOC_FILL
if (opt_zero && size > oldsize) {
memset((void *)((uintptr_t)ptr + oldsize), 0, size
- oldsize);
@@ -4688,21 +4576,19 @@ huge_dalloc(void *ptr)
/* Extract from tree of huge allocations. */
key.addr = ptr;
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
extent_tree_ad_remove(&huge, node);
-#ifdef MALLOC_STATS
huge_ndalloc++;
huge_allocated -= node->size;
huge_mapped -= CHUNK_CEILING(node->size);
-#endif
malloc_mutex_unlock(&huge_mtx);
/* Unmap chunk. */
chunk_dealloc(node->addr, CHUNK_CEILING(node->size));
base_node_dealloc(node);
}
@@ -4751,17 +4637,16 @@ malloc_print_stats(void)
_malloc_message("Max dirty pages per arena: ",
umax2s(opt_dirty_max, 10, s), "\n", "");
_malloc_message("Chunk size: ", umax2s(chunksize, 10, s), "",
"");
_malloc_message(" (2^", umax2s(opt_chunk_2pow, 10, s), ")\n",
"");
-#ifdef MALLOC_STATS
{
size_t allocated, mapped = 0;
unsigned i;
arena_t *arena;
/* Calculate and print allocated/mapped stats. */
/* arenas. */
@@ -4816,17 +4701,16 @@ malloc_print_stats(void)
"\narenas[%u]:\n", i);
malloc_spin_lock(&arena->lock);
stats_print(arena);
malloc_spin_unlock(&arena->lock);
}
}
malloc_spin_unlock(&arenas_lock);
}
-#endif /* #ifdef MALLOC_STATS */
_malloc_message("--- End malloc statistics ---\n", "", "", "");
}
}
/*
* FreeBSD's pthreads implementation calls malloc(3), so the malloc
* implementation has to take pains to avoid infinite recursion during
* initialization.
@@ -5174,28 +5058,24 @@ MALLOC_OUT:
/* Initialize chunks data. */
malloc_mutex_init(&chunks_mtx);
extent_tree_szad_new(&chunks_szad_mmap);
extent_tree_ad_new(&chunks_ad_mmap);
/* Initialize huge allocation data. */
malloc_mutex_init(&huge_mtx);
extent_tree_ad_new(&huge);
-#ifdef MALLOC_STATS
huge_nmalloc = 0;
huge_ndalloc = 0;
huge_allocated = 0;
huge_mapped = 0;
-#endif
/* Initialize base allocation data structures. */
-#ifdef MALLOC_STATS
base_mapped = 0;
base_committed = 0;
-#endif
base_nodes = NULL;
malloc_mutex_init(&base_mtx);
malloc_spin_init(&arenas_lock);
/*
* Initialize one arena here.
*/