Bug 1365460 - Use MOZ_DIAGNOSTIC_ASSERT where mozjemalloc uses RELEASE_ASSERT. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Thu, 18 May 2017 10:57:43 +0900
changeset 580761 b0b79cdcdc362151e253b8a72d82352307419fa7
parent 580760 a254c463bbd323387a3450c6f1f86306d9f120fb
child 580762 29ed98979183399fb012fc14a0799c69dab90986
push id59654
push userbmo:mh+mozilla@glandium.org
push dateThu, 18 May 2017 22:44:22 +0000
reviewersnjn
bugs1365460
milestone55.0a1
Bug 1365460 - Use MOZ_DIAGNOSTIC_ASSERT where mozjemalloc uses RELEASE_ASSERT. r?njn In Gecko code, MOZ_RELEASE_ASSERT means assertions that happen on all types of builds. In mozjemalloc, RELEASE_ASSERT means assertions that happen when MOZ_JEMALLOC_HARD_ASSERTS is set, otherwise normal assertions. Which is confusing. On the other hand, it's very similar to MOZ_DIAGNOSTIC_ASSERT, and we may just want to use that instead. Moreover, with release promotion, the check setting MOZ_JEMALLOC_HARD_ASSERTS means releases (promoted from beta) would end up with those asserts while they're not meant to, so MOZ_DIAGNOSTIC_ASSERT is actually closer to the intent. It however means we'd lose the beta population running those assertions.
memory/mozjemalloc/moz.build
memory/mozjemalloc/mozjemalloc.cpp
--- a/memory/mozjemalloc/moz.build
+++ b/memory/mozjemalloc/moz.build
@@ -13,21 +13,16 @@ SOURCES += [
 ]
 FINAL_LIBRARY = 'memory'
 
 # See bug 422055.
 if CONFIG['OS_ARCH'] == 'SunOS' and not CONFIG['GNU_CC'] \
         and CONFIG['MOZ_OPTIMIZE']:
     CFLAGS += ['-xO5']
 
-# For non release/esr builds, enable (some) fatal jemalloc assertions.  This
-# helps us catch memory errors.
-if CONFIG['MOZ_UPDATE_CHANNEL'] not in ('release', 'esr'):
-    DEFINES['MOZ_JEMALLOC_HARD_ASSERTS'] = True
-
 DEFINES['MOZ_JEMALLOC_IMPL'] = True
 
 LOCAL_INCLUDES += [
     '/memory/build',
 ]
 
 if CONFIG['GNU_CXX']:
     CXXFLAGS += ['-Wno-unused'] # too many annoying warnings from mfbt/ headers
--- a/memory/mozjemalloc/mozjemalloc.cpp
+++ b/memory/mozjemalloc/mozjemalloc.cpp
@@ -680,17 +680,17 @@ struct arena_chunk_s {
 
 	/* Map of pages within chunk that keeps track of free/large/small. */
 	arena_chunk_map_t map[1]; /* Dynamically sized. */
 };
 typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
 
 typedef struct arena_run_s arena_run_t;
 struct arena_run_s {
-#if defined(MOZ_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
+#if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
 	uint32_t	magic;
 #  define ARENA_RUN_MAGIC 0x384adf93
 #endif
 
 	/* Bin this run is associated with. */
 	arena_bin_t	*bin;
 
 	/* Index of first element that might have a free region. */
@@ -734,17 +734,17 @@ struct arena_bin_s {
 	/* Offset of first region in a run for this bin's size class. */
 	uint32_t	reg0_offset;
 
 	/* Bin statistics. */
 	malloc_bin_stats_t stats;
 };
 
 struct arena_s {
-#if defined(MOZ_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
+#if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
 	uint32_t		magic;
 #  define ARENA_MAGIC 0x947d3d24
 #endif
 
 	/* All operations on this arena require that lock be locked. */
 	malloc_spinlock_t	lock;
 
 	arena_stats_t		stats;
@@ -1207,26 +1207,16 @@ static void
 // instead of the one defined here; use only MozTagAnonymousMemory().
 
 #ifdef MOZ_MEMORY_ANDROID
 // Android's pthread.h does not declare pthread_atfork() until SDK 21.
 extern "C" MOZ_EXPORT
 int pthread_atfork(void (*)(void), void (*)(void), void(*)(void));
 #endif
 
-#if defined(MOZ_JEMALLOC_HARD_ASSERTS)
-#  define RELEASE_ASSERT(assertion) do {	\
-	if (!(assertion)) {			\
-		MOZ_CRASH_UNSAFE_OOL(#assertion);	\
-	}					\
-} while (0)
-#else
-#  define RELEASE_ASSERT(assertion) MOZ_ASSERT(assertion)
-#endif
-
 /******************************************************************************/
 /*
  * Begin mutex.  We can't use normal pthread mutexes in all places, because
  * they require malloc()ed memory, which causes bootstrapping issues in some
  * cases.
  */
 
 static bool
@@ -1932,17 +1922,17 @@ malloc_rtree_new(unsigned bits)
 	malloc_rtree_t *ret;
 	unsigned bits_per_level, height, i;
 
 	bits_per_level = ffs(pow2_ceil((MALLOC_RTREE_NODESIZE /
 	    sizeof(void *)))) - 1;
 	height = bits / bits_per_level;
 	if (height * bits_per_level != bits)
 		height++;
-	RELEASE_ASSERT(height * bits_per_level >= bits);
+	MOZ_DIAGNOSTIC_ASSERT(height * bits_per_level >= bits);
 
 	ret = (malloc_rtree_t*)base_calloc(1, sizeof(malloc_rtree_t) +
 	    (sizeof(unsigned) * (height - 1)));
 	if (ret == NULL)
 		return (NULL);
 
 	malloc_spin_init(&ret->lock);
 	ret->height = height;
@@ -2544,17 +2534,17 @@ choose_arena(void)
 #  endif
 
 	if (ret == NULL) {
                 ret = thread_local_arena(false);
 	}
 #else
 	ret = arenas[0];
 #endif
-	RELEASE_ASSERT(ret != NULL);
+	MOZ_DIAGNOSTIC_ASSERT(ret != NULL);
 	return (ret);
 }
 
 static inline int
 arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b)
 {
 	uintptr_t a_chunk = (uintptr_t)a;
 	uintptr_t b_chunk = (uintptr_t)b;
@@ -2669,17 +2659,17 @@ arena_run_reg_alloc(arena_run_t *run, ar
 			 * contains a free region.
 			 */
 			run->regs_minelm = i; /* Low payoff: + (mask == 0); */
 
 			return (ret);
 		}
 	}
 	/* Not reached. */
-	RELEASE_ASSERT(0);
+	MOZ_DIAGNOSTIC_ASSERT(0);
 	return (NULL);
 }
 
 static inline void
 arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size)
 {
 	/*
 	 * To divide by a number D that is not a power of two we multiply
@@ -2762,24 +2752,24 @@ arena_run_reg_dalloc(arena_run_t *run, a
 		/*
 		 * size_invs isn't large enough to handle this size class, so
 		 * calculate regind using actual division.  This only happens
 		 * if the user increases small_max via the 'S' runtime
 		 * configuration option.
 		 */
 		regind = diff / size;
 	};
-	RELEASE_ASSERT(diff == regind * size);
-	RELEASE_ASSERT(regind < bin->nregs);
+	MOZ_DIAGNOSTIC_ASSERT(diff == regind * size);
+	MOZ_DIAGNOSTIC_ASSERT(regind < bin->nregs);
 
 	elm = regind >> (SIZEOF_INT_2POW + 3);
 	if (elm < run->regs_minelm)
 		run->regs_minelm = elm;
 	bit = regind - (elm << (SIZEOF_INT_2POW + 3));
-	RELEASE_ASSERT((run->regs_mask[elm] & (1U << bit)) == 0);
+	MOZ_DIAGNOSTIC_ASSERT((run->regs_mask[elm] & (1U << bit)) == 0);
 	run->regs_mask[elm] |= (1U << bit);
 #undef SIZE_INV
 #undef SIZE_INV_SHIFT
 }
 
 static void
 arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
     bool zero)
@@ -3038,35 +3028,35 @@ arena_purge(arena_t *arena, bool all)
 #ifdef MOZ_DEBUG
 	size_t ndirty = 0;
 	rb_foreach_begin(arena_chunk_t, link_dirty, &arena->chunks_dirty,
 	    chunk) {
 		ndirty += chunk->ndirty;
 	} rb_foreach_end(arena_chunk_t, link_dirty, &arena->chunks_dirty, chunk)
 	MOZ_ASSERT(ndirty == arena->ndirty);
 #endif
-	RELEASE_ASSERT(all || (arena->ndirty > opt_dirty_max));
+	MOZ_DIAGNOSTIC_ASSERT(all || (arena->ndirty > opt_dirty_max));
 
 	arena->stats.npurge++;
 
 	/*
 	 * Iterate downward through chunks until enough dirty memory has been
 	 * purged.  Terminate as soon as possible in order to minimize the
 	 * number of system calls, even if a chunk has only been partially
 	 * purged.
 	 */
 	while (arena->ndirty > (dirty_max >> 1)) {
 #ifdef MALLOC_DOUBLE_PURGE
 		bool madvised = false;
 #endif
 		chunk = arena_chunk_tree_dirty_last(&arena->chunks_dirty);
-		RELEASE_ASSERT(chunk != NULL);
+		MOZ_DIAGNOSTIC_ASSERT(chunk != NULL);
 
 		for (i = chunk_npages - 1; chunk->ndirty > 0; i--) {
-			RELEASE_ASSERT(i >= arena_chunk_header_npages);
+			MOZ_DIAGNOSTIC_ASSERT(i >= arena_chunk_header_npages);
 
 			if (chunk->map[i].bits & CHUNK_MAP_DIRTY) {
 #ifdef MALLOC_DECOMMIT
 				const size_t free_operation = CHUNK_MAP_DECOMMITTED;
 #else
 				const size_t free_operation = CHUNK_MAP_MADVISED;
 #endif
 				MOZ_ASSERT((chunk->map[i].bits &
@@ -3128,30 +3118,30 @@ static void
 arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
 {
 	arena_chunk_t *chunk;
 	size_t size, run_ind, run_pages;
 
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
 	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk)
 	    >> pagesize_2pow);
-	RELEASE_ASSERT(run_ind >= arena_chunk_header_npages);
-	RELEASE_ASSERT(run_ind < chunk_npages);
+	MOZ_DIAGNOSTIC_ASSERT(run_ind >= arena_chunk_header_npages);
+	MOZ_DIAGNOSTIC_ASSERT(run_ind < chunk_npages);
 	if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0)
 		size = chunk->map[run_ind].bits & ~pagesize_mask;
 	else
 		size = run->bin->run_size;
 	run_pages = (size >> pagesize_2pow);
 
 	/* Mark pages as unallocated in the chunk map. */
 	if (dirty) {
 		size_t i;
 
 		for (i = 0; i < run_pages; i++) {
-			RELEASE_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY)
+			MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY)
 			    == 0);
 			chunk->map[run_ind + i].bits = CHUNK_MAP_DIRTY;
 		}
 
 		if (chunk->ndirty == 0) {
 			arena_chunk_tree_dirty_insert(&arena->chunks_dirty,
 			    chunk);
 		}
@@ -3181,17 +3171,17 @@ arena_run_dalloc(arena_t *arena, arena_r
 		 * inserted later.
 		 */
 		arena_avail_tree_remove(&arena->runs_avail,
 		    &chunk->map[run_ind+run_pages]);
 
 		size += nrun_size;
 		run_pages = size >> pagesize_2pow;
 
-		RELEASE_ASSERT((chunk->map[run_ind+run_pages-1].bits & ~pagesize_mask)
+		MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind+run_pages-1].bits & ~pagesize_mask)
 		    == nrun_size);
 		chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
 		    pagesize_mask);
 		chunk->map[run_ind+run_pages-1].bits = size |
 		    (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
 	}
 
 	/* Try to coalesce backward. */
@@ -3206,17 +3196,17 @@ arena_run_dalloc(arena_t *arena, arena_r
 		 * inserted later.
 		 */
 		arena_avail_tree_remove(&arena->runs_avail,
 		    &chunk->map[run_ind]);
 
 		size += prun_size;
 		run_pages = size >> pagesize_2pow;
 
-		RELEASE_ASSERT((chunk->map[run_ind].bits & ~pagesize_mask) ==
+		MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind].bits & ~pagesize_mask) ==
 		    prun_size);
 		chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
 		    pagesize_mask);
 		chunk->map[run_ind+run_pages-1].bits = size |
 		    (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
 	}
 
 	/* Insert into runs_avail, now that coalescing is complete. */
@@ -3316,53 +3306,53 @@ arena_bin_nonfull_run_get(arena_t *arena
 		/* The last element has spare bits that need to be unset. */
 		run->regs_mask[i] = (UINT_MAX >> ((1U << (SIZEOF_INT_2POW + 3))
 		    - remainder));
 	}
 
 	run->regs_minelm = 0;
 
 	run->nfree = bin->nregs;
-#if defined(MOZ_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
+#if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
 	run->magic = ARENA_RUN_MAGIC;
 #endif
 
 	bin->stats.nruns++;
 	bin->stats.curruns++;
 	if (bin->stats.curruns > bin->stats.highruns)
 		bin->stats.highruns = bin->stats.curruns;
 	return (run);
 }
 
 /* bin->runcur must have space available before this function is called. */
 static inline void *
 arena_bin_malloc_easy(arena_t *arena, arena_bin_t *bin, arena_run_t *run)
 {
 	void *ret;
 
-	RELEASE_ASSERT(run->magic == ARENA_RUN_MAGIC);
-	RELEASE_ASSERT(run->nfree > 0);
+	MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
+	MOZ_DIAGNOSTIC_ASSERT(run->nfree > 0);
 
 	ret = arena_run_reg_alloc(run, bin);
-	RELEASE_ASSERT(ret != NULL);
+	MOZ_DIAGNOSTIC_ASSERT(ret != NULL);
 	run->nfree--;
 
 	return (ret);
 }
 
 /* Re-fill bin->runcur, then call arena_bin_malloc_easy(). */
 static void *
 arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
 {
 
 	bin->runcur = arena_bin_nonfull_run_get(arena, bin);
 	if (bin->runcur == NULL)
 		return (NULL);
-	RELEASE_ASSERT(bin->runcur->magic == ARENA_RUN_MAGIC);
-	RELEASE_ASSERT(bin->runcur->nfree > 0);
+	MOZ_DIAGNOSTIC_ASSERT(bin->runcur->magic == ARENA_RUN_MAGIC);
+	MOZ_DIAGNOSTIC_ASSERT(bin->runcur->nfree > 0);
 
 	return (arena_bin_malloc_easy(arena, bin, bin->runcur));
 }
 
 /*
  * Calculate bin->run_size such that it meets the following constraints:
  *
  *   *) bin->run_size >= min_run_size
@@ -3469,17 +3459,17 @@ arena_malloc_small(arena_t *arena, size_
 		bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
 		    - 1];
 	} else {
 		/* Sub-page. */
 		size = pow2_ceil(size);
 		bin = &arena->bins[ntbins + nqbins
 		    + (ffs((int)(size >> opt_small_max_2pow)) - 2)];
 	}
-	RELEASE_ASSERT(size == bin->reg_size);
+	MOZ_DIAGNOSTIC_ASSERT(size == bin->reg_size);
 
 	malloc_spin_lock(&arena->lock);
 	if ((run = bin->runcur) != NULL && run->nfree > 0)
 		ret = arena_bin_malloc_easy(arena, bin, run);
 	else
 		ret = arena_bin_malloc_hard(arena, bin);
 
 	if (ret == NULL) {
@@ -3530,17 +3520,17 @@ arena_malloc_large(arena_t *arena, size_
 	return (ret);
 }
 
 static inline void *
 arena_malloc(arena_t *arena, size_t size, bool zero)
 {
 
 	MOZ_ASSERT(arena != NULL);
-	RELEASE_ASSERT(arena->magic == ARENA_MAGIC);
+	MOZ_DIAGNOSTIC_ASSERT(arena->magic == ARENA_MAGIC);
 	MOZ_ASSERT(size != 0);
 	MOZ_ASSERT(QUANTUM_CEILING(size) <= arena_maxclass);
 
 	if (size <= bin_maxclass) {
 		return (arena_malloc_small(arena, size, zero));
 	} else
 		return (arena_malloc_large(arena, size, zero));
 }
@@ -3726,24 +3716,24 @@ arena_salloc(const void *ptr)
 	size_t pageind, mapbits;
 
 	MOZ_ASSERT(ptr != NULL);
 	MOZ_ASSERT(CHUNK_ADDR2BASE(ptr) != ptr);
 
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 	pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
 	mapbits = chunk->map[pageind].bits;
-	RELEASE_ASSERT((mapbits & CHUNK_MAP_ALLOCATED) != 0);
+	MOZ_DIAGNOSTIC_ASSERT((mapbits & CHUNK_MAP_ALLOCATED) != 0);
 	if ((mapbits & CHUNK_MAP_LARGE) == 0) {
 		arena_run_t *run = (arena_run_t *)(mapbits & ~pagesize_mask);
-		RELEASE_ASSERT(run->magic == ARENA_RUN_MAGIC);
+		MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
 		ret = run->bin->reg_size;
 	} else {
 		ret = mapbits & ~pagesize_mask;
-		RELEASE_ASSERT(ret != 0);
+		MOZ_DIAGNOSTIC_ASSERT(ret != 0);
 	}
 
 	return (ret);
 }
 
 /*
  * Validate ptr before assuming that it points to an allocation.  Currently,
  * the following validation is performed:
@@ -3760,17 +3750,17 @@ isalloc_validate(const void *ptr)
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 	if (chunk == NULL)
 		return (0);
 
 	if (malloc_rtree_get(chunk_rtree, (uintptr_t)chunk) == NULL)
 		return (0);
 
 	if (chunk != ptr) {
-		RELEASE_ASSERT(chunk->arena->magic == ARENA_MAGIC);
+		MOZ_DIAGNOSTIC_ASSERT(chunk->arena->magic == ARENA_MAGIC);
 		return (arena_salloc(ptr));
 	} else {
 		size_t ret;
 		extent_node_t *node;
 		extent_node_t key;
 
 		/* Chunk. */
 		key.addr = (void *)chunk;
@@ -3804,17 +3794,17 @@ isalloc(const void *ptr)
 
 		/* Chunk (huge allocation). */
 
 		malloc_mutex_lock(&huge_mtx);
 
 		/* Extract from tree of huge allocations. */
 		key.addr = const_cast<void*>(ptr);
 		node = extent_tree_ad_search(&huge, &key);
-		RELEASE_ASSERT(node != NULL);
+		MOZ_DIAGNOSTIC_ASSERT(node != NULL);
 
 		ret = node->size;
 
 		malloc_mutex_unlock(&huge_mtx);
 	}
 
 	return (ret);
 }
@@ -3823,17 +3813,17 @@ static inline void
 arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
     arena_chunk_map_t *mapelm)
 {
 	arena_run_t *run;
 	arena_bin_t *bin;
 	size_t size;
 
 	run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
-	RELEASE_ASSERT(run->magic == ARENA_RUN_MAGIC);
+	MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
 	bin = run->bin;
 	size = bin->reg_size;
 
 	memset(ptr, kAllocPoison, size);
 
 	arena_run_reg_dalloc(run, bin, ptr, size);
 	run->nfree++;
 
@@ -3846,21 +3836,21 @@ arena_dalloc_small(arena_t *arena, arena
 			    (uintptr_t)chunk)) >> pagesize_2pow;
 			arena_chunk_map_t *run_mapelm =
 			    &chunk->map[run_pageind];
 			/*
 			 * This block's conditional is necessary because if the
 			 * run only contains one region, then it never gets
 			 * inserted into the non-full runs tree.
 			 */
-			RELEASE_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
+			MOZ_DIAGNOSTIC_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
 				run_mapelm);
 			arena_run_tree_remove(&bin->runs, run_mapelm);
 		}
-#if defined(MOZ_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
+#if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
 		run->magic = 0;
 #endif
 		arena_run_dalloc(arena, run, true);
 		bin->stats.curruns--;
 	} else if (run->nfree == 1 && run != bin->runcur) {
 		/*
 		 * Make sure that bin->runcur always refers to the lowest
 		 * non-full run, if one exists.
@@ -3874,29 +3864,29 @@ arena_dalloc_small(arena_t *arena, arena
 				    (arena_chunk_t*)CHUNK_ADDR2BASE(bin->runcur);
 				size_t runcur_pageind =
 				    (((uintptr_t)bin->runcur -
 				    (uintptr_t)runcur_chunk)) >> pagesize_2pow;
 				arena_chunk_map_t *runcur_mapelm =
 				    &runcur_chunk->map[runcur_pageind];
 
 				/* Insert runcur. */
-				RELEASE_ASSERT(arena_run_tree_search(&bin->runs,
+				MOZ_DIAGNOSTIC_ASSERT(arena_run_tree_search(&bin->runs,
 				    runcur_mapelm) == NULL);
 				arena_run_tree_insert(&bin->runs,
 				    runcur_mapelm);
 			}
 			bin->runcur = run;
 		} else {
 			size_t run_pageind = (((uintptr_t)run -
 			    (uintptr_t)chunk)) >> pagesize_2pow;
 			arena_chunk_map_t *run_mapelm =
 			    &chunk->map[run_pageind];
 
-			RELEASE_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
+			MOZ_DIAGNOSTIC_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
 			    NULL);
 			arena_run_tree_insert(&bin->runs, run_mapelm);
 		}
 	}
 	arena->stats.allocated_small -= size;
 	arena->stats.ndalloc_small++;
 }
 
@@ -3924,22 +3914,22 @@ arena_dalloc(void *ptr, size_t offset)
 
 	MOZ_ASSERT(ptr != NULL);
 	MOZ_ASSERT(offset != 0);
 	MOZ_ASSERT(CHUNK_ADDR2OFFSET(ptr) == offset);
 
 	chunk = (arena_chunk_t *) ((uintptr_t)ptr - offset);
 	arena = chunk->arena;
 	MOZ_ASSERT(arena != NULL);
-	RELEASE_ASSERT(arena->magic == ARENA_MAGIC);
+	MOZ_DIAGNOSTIC_ASSERT(arena->magic == ARENA_MAGIC);
 
 	malloc_spin_lock(&arena->lock);
 	pageind = offset >> pagesize_2pow;
 	mapelm = &chunk->map[pageind];
-	RELEASE_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
+	MOZ_DIAGNOSTIC_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
 	if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
 		/* Small allocation. */
 		arena_dalloc_small(arena, chunk, ptr, mapelm);
 	} else {
 		/* Large allocation. */
 		arena_dalloc_large(arena, chunk, ptr);
 	}
 	malloc_spin_unlock(&arena->lock);
@@ -3980,17 +3970,17 @@ arena_ralloc_large_shrink(arena_t *arena
 static bool
 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
     size_t size, size_t oldsize)
 {
 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow;
 	size_t npages = oldsize >> pagesize_2pow;
 
 	malloc_spin_lock(&arena->lock);
-	RELEASE_ASSERT(oldsize == (chunk->map[pageind].bits & ~pagesize_mask));
+	MOZ_DIAGNOSTIC_ASSERT(oldsize == (chunk->map[pageind].bits & ~pagesize_mask));
 
 	/* Try to extend the run. */
 	MOZ_ASSERT(size > oldsize);
 	if (pageind + npages < chunk_npages && (chunk->map[pageind+npages].bits
 	    & CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[pageind+npages].bits &
 	    ~pagesize_mask) >= size - oldsize) {
 		/*
 		 * The next run is available and sufficiently large.  Split the
@@ -4033,17 +4023,17 @@ arena_ralloc_large(void *ptr, size_t siz
 		}
 		return (false);
 	} else {
 		arena_chunk_t *chunk;
 		arena_t *arena;
 
 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 		arena = chunk->arena;
-		RELEASE_ASSERT(arena->magic == ARENA_MAGIC);
+		MOZ_DIAGNOSTIC_ASSERT(arena->magic == ARENA_MAGIC);
 
 		if (psize < oldsize) {
 			/* Fill before shrinking in order avoid a race. */
 			memset((void *)((uintptr_t)ptr + size), kAllocPoison,
 			    oldsize - size);
 			arena_ralloc_large_shrink(arena, chunk, ptr, psize,
 			    oldsize);
 			return (false);
@@ -4190,17 +4180,17 @@ arena_new(arena_t *arena)
 
 		bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
 
 		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
 
 		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
 	}
 
-#if defined(MOZ_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
+#if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
 	arena->magic = ARENA_MAGIC;
 #endif
 
 	return (false);
 }
 
 static inline arena_t *
 arenas_fallback()
@@ -5384,17 +5374,17 @@ hard_purge_chunk(arena_chunk_t *chunk)
 	for (i = arena_chunk_header_npages; i < chunk_npages; i++) {
 		/* Find all adjacent pages with CHUNK_MAP_MADVISED set. */
 		size_t npages;
 		for (npages = 0;
 		     chunk->map[i + npages].bits & CHUNK_MAP_MADVISED && i + npages < chunk_npages;
 		     npages++) {
 			/* Turn off the chunk's MADV_FREED bit and turn on its
 			 * DECOMMITTED bit. */
-			RELEASE_ASSERT(!(chunk->map[i + npages].bits & CHUNK_MAP_DECOMMITTED));
+			MOZ_DIAGNOSTIC_ASSERT(!(chunk->map[i + npages].bits & CHUNK_MAP_DECOMMITTED));
 			chunk->map[i + npages].bits ^= CHUNK_MAP_MADVISED_OR_DECOMMITTED;
 		}
 
 		/* We could use mincore to find out which pages are actually
 		 * present, but it's not clear that's better. */
 		if (npages > 0) {
 			pages_decommit(((char*)chunk) + (i << pagesize_2pow), npages << pagesize_2pow);
 			pages_commit(((char*)chunk) + (i << pagesize_2pow), npages << pagesize_2pow);