--- a/memory/mozjemalloc/jemalloc.c
+++ b/memory/mozjemalloc/jemalloc.c
@@ -145,19 +145,16 @@
*/
#ifdef MOZ_WIDGET_GONK
/* Reduce the amount of unused dirty pages to 1MiB on B2G */
# define MOZ_MALLOC_OPTIONS "ff"
#else
# define MOZ_MALLOC_OPTIONS ""
#endif
-/* Memory filling (junk/poison/zero). */
-#define MALLOC_FILL
-
#ifndef MALLOC_PRODUCTION
/*
* MALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
# define MALLOC_DEBUG
/* Support optional abort() on OOM. */
@@ -1078,29 +1075,25 @@ static __thread arena_t *arenas_map;
/*
* Runtime configuration options.
*/
MOZ_JEMALLOC_API
const char *_malloc_options = MOZ_MALLOC_OPTIONS;
#ifndef MALLOC_PRODUCTION
static bool opt_abort = true;
-#ifdef MALLOC_FILL
static bool opt_junk = true;
static bool opt_poison = true;
static bool opt_zero = false;
-#endif
#else
static bool opt_abort = false;
-#ifdef MALLOC_FILL
static const bool opt_junk = false;
static const bool opt_poison = true;
static const bool opt_zero = false;
#endif
-#endif
static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
static bool opt_print_stats = false;
#ifdef MALLOC_STATIC_SIZES
#define opt_quantum_2pow QUANTUM_2POW_MIN
#define opt_small_max_2pow SMALL_MAX_2POW_DEFAULT
#define opt_chunk_2pow CHUNK_2POW_DEFAULT
#else
@@ -3580,22 +3573,20 @@ arena_malloc_small(arena_t *arena, size_
}
bin->stats.nrequests++;
arena->stats.nmalloc_small++;
arena->stats.allocated_small += size;
malloc_spin_unlock(&arena->lock);
if (zero == false) {
-#ifdef MALLOC_FILL
if (opt_junk)
memset(ret, 0xe4, size);
else if (opt_zero)
memset(ret, 0, size);
-#endif
} else
memset(ret, 0, size);
return (ret);
}
static void *
arena_malloc_large(arena_t *arena, size_t size, bool zero)
@@ -3610,22 +3601,20 @@ arena_malloc_large(arena_t *arena, size_
malloc_spin_unlock(&arena->lock);
return (NULL);
}
arena->stats.nmalloc_large++;
arena->stats.allocated_large += size;
malloc_spin_unlock(&arena->lock);
if (zero == false) {
-#ifdef MALLOC_FILL
if (opt_junk)
memset(ret, 0xe4, size);
else if (opt_zero)
memset(ret, 0, size);
-#endif
}
return (ret);
}
static inline void *
arena_malloc(arena_t *arena, size_t size, bool zero)
{
@@ -3706,22 +3695,20 @@ arena_palloc(arena_t *arena, size_t alig
size, false);
}
}
arena->stats.nmalloc_large++;
arena->stats.allocated_large += size;
malloc_spin_unlock(&arena->lock);
-#ifdef MALLOC_FILL
if (opt_junk)
memset(ret, 0xe4, size);
else if (opt_zero)
memset(ret, 0, size);
-#endif
return (ret);
}
static inline void *
ipalloc(size_t alignment, size_t size)
{
void *ret;
size_t ceil_size;
@@ -3925,20 +3912,18 @@ arena_dalloc_small(arena_t *arena, arena
arena_bin_t *bin;
size_t size;
run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
RELEASE_ASSERT(run->magic == ARENA_RUN_MAGIC);
bin = run->bin;
size = bin->reg_size;
-#ifdef MALLOC_FILL
if (opt_poison)
memset(ptr, 0xe5, size);
-#endif
arena_run_reg_dalloc(run, bin, ptr, size);
run->nfree++;
if (run->nfree == bin->nregs) {
/* Deallocate run. */
if (run == bin->runcur)
bin->runcur = NULL;
@@ -4003,20 +3988,18 @@ arena_dalloc_small(arena_t *arena, arena
static void
arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
pagesize_2pow;
size_t size = chunk->map[pageind].bits & ~pagesize_mask;
-#ifdef MALLOC_FILL
if (opt_poison)
memset(ptr, 0xe5, size);
-#endif
arena->stats.allocated_large -= size;
arena->stats.ndalloc_large++;
arena_run_dalloc(arena, (arena_run_t *)ptr, true);
}
static inline void
arena_dalloc(void *ptr, size_t offset)
@@ -4126,51 +4109,45 @@ arena_ralloc_large_grow(arena_t *arena,
static bool
arena_ralloc_large(void *ptr, size_t size, size_t oldsize)
{
size_t psize;
psize = PAGE_CEILING(size);
if (psize == oldsize) {
/* Same size class. */
-#ifdef MALLOC_FILL
if (opt_poison && size < oldsize) {
memset((void *)((uintptr_t)ptr + size), 0xe5, oldsize -
size);
}
-#endif
return (false);
} else {
arena_chunk_t *chunk;
arena_t *arena;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena = chunk->arena;
RELEASE_ASSERT(arena->magic == ARENA_MAGIC);
if (psize < oldsize) {
-#ifdef MALLOC_FILL
/* Fill before shrinking in order avoid a race. */
if (opt_poison) {
memset((void *)((uintptr_t)ptr + size), 0xe5,
oldsize - size);
}
-#endif
arena_ralloc_large_shrink(arena, chunk, ptr, psize,
oldsize);
return (false);
} else {
bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
psize, oldsize);
-#ifdef MALLOC_FILL
if (ret == false && opt_zero) {
memset((void *)((uintptr_t)ptr + oldsize), 0,
size - oldsize);
}
-#endif
return (ret);
}
}
}
static void *
arena_ralloc(void *ptr, size_t size, size_t oldsize)
{
@@ -4213,22 +4190,20 @@ arena_ralloc(void *ptr, size_t size, siz
if (copysize >= VM_COPY_MIN)
pages_copy(ret, ptr, copysize);
else
#endif
memcpy(ret, ptr, copysize);
idalloc(ptr);
return (ret);
IN_PLACE:
-#ifdef MALLOC_FILL
if (opt_poison && size < oldsize)
memset((void *)((uintptr_t)ptr + size), 0xe5, oldsize - size);
else if (opt_zero && size > oldsize)
memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize);
-#endif
return (ptr);
}
static inline void *
iralloc(void *ptr, size_t size)
{
size_t oldsize;
@@ -4451,53 +4426,49 @@ huge_palloc(size_t size, size_t alignmen
huge_mapped += csize;
malloc_mutex_unlock(&huge_mtx);
#ifdef MALLOC_DECOMMIT
if (csize - psize > 0)
pages_decommit((void *)((uintptr_t)ret + psize), csize - psize);
#endif
-#ifdef MALLOC_FILL
if (zero == false) {
if (opt_junk)
# ifdef MALLOC_DECOMMIT
memset(ret, 0xe4, psize);
# else
memset(ret, 0xe4, csize);
# endif
else if (opt_zero)
# ifdef MALLOC_DECOMMIT
memset(ret, 0, psize);
# else
memset(ret, 0, csize);
# endif
}
-#endif
return (ret);
}
static void *
huge_ralloc(void *ptr, size_t size, size_t oldsize)
{
void *ret;
size_t copysize;
/* Avoid moving the allocation if the size class would not change. */
if (oldsize > arena_maxclass &&
CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
size_t psize = PAGE_CEILING(size);
-#ifdef MALLOC_FILL
if (opt_poison && size < oldsize) {
memset((void *)((uintptr_t)ptr + size), 0xe5, oldsize
- size);
}
-#endif
#ifdef MALLOC_DECOMMIT
if (psize < oldsize) {
extent_node_t *node, key;
pages_decommit((void *)((uintptr_t)ptr + psize),
oldsize - psize);
/* Update recorded size. */
@@ -4533,22 +4504,20 @@ huge_ralloc(void *ptr, size_t size, size
assert(node->size == oldsize);
huge_allocated += psize - oldsize;
/* No need to change huge_mapped, because we didn't
* (un)map anything. */
node->size = psize;
malloc_mutex_unlock(&huge_mtx);
}
-#ifdef MALLOC_FILL
if (opt_zero && size > oldsize) {
memset((void *)((uintptr_t)ptr + oldsize), 0, size
- oldsize);
}
-#endif
return (ptr);
}
/*
* If we get here, then size and oldsize are different enough that we
* need to use a different size class. In that case, fall back to
* allocating new space and copying.
*/
@@ -4605,30 +4574,26 @@ malloc_print_stats(void)
#ifdef NDEBUG
"disabled",
#else
"enabled",
#endif
"\n", "");
_malloc_message("Boolean MALLOC_OPTIONS: ",
opt_abort ? "A" : "a", "", "");
-#ifdef MALLOC_FILL
_malloc_message(opt_poison ? "C" : "c", "", "", "");
_malloc_message(opt_junk ? "J" : "j", "", "", "");
-#endif
_malloc_message("P", "", "", "");
#ifdef MALLOC_SYSV
_malloc_message(opt_sysv ? "V" : "v", "", "", "");
#endif
#ifdef MALLOC_XMALLOC
_malloc_message(opt_xmalloc ? "X" : "x", "", "", "");
#endif
-#ifdef MALLOC_FILL
_malloc_message(opt_zero ? "Z" : "z", "", "", "");
-#endif
_malloc_message("\n", "", "", "");
_malloc_message("Max arenas: ", umax2s(narenas, 10, s), "\n",
"");
_malloc_message("Pointer size: ", umax2s(sizeof(void *), 10, s),
"\n", "");
_malloc_message("Quantum size: ", umax2s(quantum, 10, s), "\n",
"");
@@ -4878,45 +4843,41 @@ MALLOC_OUT:
for (k = 0; k < nreps; k++) {
switch (opts[j]) {
case 'a':
opt_abort = false;
break;
case 'A':
opt_abort = true;
break;
-#ifdef MALLOC_FILL
#ifndef MALLOC_PRODUCTION
case 'c':
opt_poison = false;
break;
case 'C':
opt_poison = true;
break;
#endif
-#endif
case 'f':
opt_dirty_max >>= 1;
break;
case 'F':
if (opt_dirty_max == 0)
opt_dirty_max = 1;
else if ((opt_dirty_max << 1) != 0)
opt_dirty_max <<= 1;
break;
-#ifdef MALLOC_FILL
#ifndef MALLOC_PRODUCTION
case 'j':
opt_junk = false;
break;
case 'J':
opt_junk = true;
break;
#endif
-#endif
#ifndef MALLOC_STATIC_SIZES
case 'k':
/*
* Chunks always require at least one
* header page, so chunks can never be
* smaller than two pages.
*/
if (opt_chunk_2pow > pagesize_2pow + 1)
@@ -4966,26 +4927,24 @@ MALLOC_OUT:
#ifdef MALLOC_XMALLOC
case 'x':
opt_xmalloc = false;
break;
case 'X':
opt_xmalloc = true;
break;
#endif
-#ifdef MALLOC_FILL
#ifndef MALLOC_PRODUCTION
case 'z':
opt_zero = false;
break;
case 'Z':
opt_zero = true;
break;
#endif
-#endif
default: {
char cbuf[2];
cbuf[0] = opts[j];
cbuf[1] = '\0';
_malloc_message(_getprogname(),
": (malloc) Unsupported character "
"in malloc options: '", cbuf,
@@ -5515,41 +5474,29 @@ jemalloc_stats_impl(jemalloc_stats_t *st
size_t i, non_arena_mapped, chunk_header_size;
assert(stats != NULL);
/*
* Gather runtime settings.
*/
stats->opt_abort = opt_abort;
- stats->opt_junk =
-#ifdef MALLOC_FILL
- opt_junk ? true :
-#endif
- false;
- stats->opt_poison =
-#ifdef MALLOC_FILL
- opt_poison ? true :
-#endif
- false;
+ stats->opt_junk = opt_junk;
+ stats->opt_poison = opt_poison;
stats->opt_sysv =
#ifdef MALLOC_SYSV
opt_sysv ? true :
#endif
false;
stats->opt_xmalloc =
#ifdef MALLOC_XMALLOC
opt_xmalloc ? true :
#endif
false;
- stats->opt_zero =
-#ifdef MALLOC_FILL
- opt_zero ? true :
-#endif
- false;
+ stats->opt_zero = opt_zero;
stats->narenas = narenas;
stats->quantum = quantum;
stats->small_max = small_max;
stats->large_max = arena_maxclass;
stats->chunksize = chunksize;
stats->dirty_max = opt_dirty_max;
/*