Bug 1365460 - Replace assert with MOZ_ASSERT. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Thu, 18 May 2017 10:52:00 +0900
changeset 580759 46384aaa0e54ca91c08756375c0ca3a93a19e279
parent 580758 d8b3bec50929ade8d16fc143e80e0fd36ed570d7
child 580760 a254c463bbd323387a3450c6f1f86306d9f120fb
push id59654
push userbmo:mh+mozilla@glandium.org
push dateThu, 18 May 2017 22:44:22 +0000
reviewersnjn
bugs1365460
milestone55.0a1
Bug 1365460 - Replace assert with MOZ_ASSERT. r?njn
memory/mozjemalloc/mozjemalloc.cpp
memory/mozjemalloc/rb.h
--- a/memory/mozjemalloc/mozjemalloc.cpp
+++ b/memory/mozjemalloc/mozjemalloc.cpp
@@ -1201,36 +1201,30 @@ static void
 }
 
 #include "mozilla/Assertions.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/TaggedAnonymousMemory.h"
 // Note: MozTaggedAnonymousMmap() could call an LD_PRELOADed mmap
 // instead of the one defined here; use only MozTagAnonymousMemory().
 
-#ifdef MOZ_DEBUG
-#  define assert(e) MOZ_ASSERT(e)
-#else
-#  define assert(e)
-#endif
-
 #ifdef MOZ_MEMORY_ANDROID
 // Android's pthread.h does not declare pthread_atfork() until SDK 21.
 extern "C" MOZ_EXPORT
 int pthread_atfork(void (*)(void), void (*)(void), void(*)(void));
 #endif
 
 #if defined(MOZ_JEMALLOC_HARD_ASSERTS)
 #  define RELEASE_ASSERT(assertion) do {	\
 	if (!(assertion)) {			\
 		MOZ_CRASH_UNSAFE_OOL(#assertion);	\
 	}					\
 } while (0)
 #else
-#  define RELEASE_ASSERT(assertion) assert(assertion)
+#  define RELEASE_ASSERT(assertion) MOZ_ASSERT(assertion)
 #endif
 
 /******************************************************************************/
 /*
  * Begin mutex.  We can't use normal pthread mutexes in all places, because
  * they require malloc()ed memory, which causes bootstrapping issues in some
  * cases.
  */
@@ -1486,17 +1480,17 @@ pages_commit(void *addr, size_t size)
 }
 
 static bool
 base_pages_alloc(size_t minsize)
 {
 	size_t csize;
 	size_t pminsize;
 
-	assert(minsize != 0);
+	MOZ_ASSERT(minsize != 0);
 	csize = CHUNK_CEILING(minsize);
 	base_pages = chunk_alloc(csize, chunksize, true, false);
 	if (base_pages == NULL)
 		return (true);
 	base_next_addr = base_pages;
 	base_past_addr = (void *)((uintptr_t)base_pages + csize);
 	/*
 	 * Leave enough pages for minsize committed, since otherwise they would
@@ -1784,17 +1778,17 @@ pages_map_align(size_t size, size_t alig
 	void *ret;
 
 	/*
 	 * We don't use MAP_FIXED here, because it can cause the *replacement*
 	 * of existing mappings, and we only want to create new mappings.
 	 */
 	ret = mmap((void *)alignment, size, PROT_READ | PROT_WRITE,
 		MAP_PRIVATE | MAP_NOSYNC | MAP_ALIGN | MAP_ANON, -1, 0);
-	assert(ret != NULL);
+	MOZ_ASSERT(ret != NULL);
 
 	if (ret == MAP_FAILED)
 		ret = NULL;
 	else
 		MozTagAnonymousMemory(ret, size, "jemalloc");
 	return (ret);
 }
 #endif
@@ -1847,17 +1841,17 @@ pages_map(void *addr, size_t size)
 #else
 
 	/*
 	 * We don't use MAP_FIXED here, because it can cause the *replacement*
 	 * of existing mappings, and we only want to create new mappings.
 	 */
 	ret = mmap(addr, size, PROT_READ | PROT_WRITE,
 		MAP_PRIVATE | MAP_ANON, -1, 0);
-	assert(ret != NULL);
+	MOZ_ASSERT(ret != NULL);
 #endif
 	if (ret == MAP_FAILED) {
 		ret = NULL;
 	}
 #if defined(__ia64__) || (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
         /*
          * If the allocated memory doesn't have its upper 17 bits clear, consider it
          * as out of memory.
@@ -1886,20 +1880,20 @@ pages_map(void *addr, size_t size)
 		}
 		ret = NULL;
 	}
 	if (ret != NULL) {
 		MozTagAnonymousMemory(ret, size, "jemalloc");
 	}
 
 #if defined(__ia64__) || (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
-	assert(ret == NULL || (!check_placement && ret != NULL)
+	MOZ_ASSERT(ret == NULL || (!check_placement && ret != NULL)
 	    || (check_placement && ret == addr));
 #else
-	assert(ret == NULL || (addr == NULL && ret != addr)
+	MOZ_ASSERT(ret == NULL || (addr == NULL && ret != addr)
 	    || (addr != NULL && ret == addr));
 #endif
 	return (ret);
 }
 
 static void
 pages_unmap(void *addr, size_t size)
 {
@@ -1918,19 +1912,19 @@ pages_unmap(void *addr, size_t size)
 #endif
 
 #ifdef MOZ_MEMORY_DARWIN
 #define	VM_COPY_MIN (pagesize << 5)
 static inline void
 pages_copy(void *dest, const void *src, size_t n)
 {
 
-	assert((void *)((uintptr_t)dest & ~pagesize_mask) == dest);
-	assert(n >= VM_COPY_MIN);
-	assert((void *)((uintptr_t)src & ~pagesize_mask) == src);
+	MOZ_ASSERT((void *)((uintptr_t)dest & ~pagesize_mask) == dest);
+	MOZ_ASSERT(n >= VM_COPY_MIN);
+	MOZ_ASSERT((void *)((uintptr_t)src & ~pagesize_mask) == src);
 
 	vm_copy(mach_task_self(), (vm_address_t)src, (vm_size_t)n,
 	    (vm_address_t)dest);
 }
 #endif
 
 static inline malloc_rtree_t *
 malloc_rtree_new(unsigned bits)
@@ -2025,17 +2019,17 @@ MALLOC_RTREE_GET_GENERATE(malloc_rtree_g
     * munmap()ped, followed by a different allocator in another thread re-using
     * overlapping virtual memory, all without invalidating the cached rtree
     * value.  The result would be a false positive (the rtree would claim that
     * jemalloc owns memory that it had actually discarded).  I don't think this
     * scenario is possible, but the following assertion is a prudent sanity
     * check.
     */
 #  define MALLOC_RTREE_GET_VALIDATE					\
-	assert(malloc_rtree_get_locked(rtree, key) == ret);
+	MOZ_ASSERT(malloc_rtree_get_locked(rtree, key) == ret);
 #else
 #  define MALLOC_RTREE_GET_VALIDATE
 #endif
 MALLOC_RTREE_GET_GENERATE(malloc_rtree_get)
 #undef MALLOC_RTREE_LOCK
 #undef MALLOC_RTREE_UNLOCK
 #undef MALLOC_RTREE_GET_VALIDATE
 
@@ -2084,17 +2078,17 @@ malloc_rtree_set(malloc_rtree_t *rtree, 
 #define        ALIGNMENT_CEILING(s, alignment)                                        \
         (((s) + (alignment - 1)) & (-(alignment)))
 
 static void *
 pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
 {
         void *ret = (void *)((uintptr_t)addr + leadsize);
 
-        assert(alloc_size >= leadsize + size);
+        MOZ_ASSERT(alloc_size >= leadsize + size);
 #ifdef MOZ_MEMORY_WINDOWS
         {
                 void *new_addr;
 
                 pages_unmap(addr, alloc_size);
                 new_addr = pages_map(ret, size);
                 if (new_addr == ret)
                         return (ret);
@@ -2129,17 +2123,17 @@ chunk_alloc_mmap_slow(size_t size, size_
                 pages = pages_map(NULL, alloc_size);
                 if (pages == NULL)
                         return (NULL);
                 leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
                         (uintptr_t)pages;
                 ret = pages_trim(pages, alloc_size, leadsize, size);
         } while (ret == NULL);
 
-        assert(ret != NULL);
+        MOZ_ASSERT(ret != NULL);
         return (ret);
 }
 
 static void *
 chunk_alloc_mmap(size_t size, size_t alignment)
 {
 #ifdef JEMALLOC_USES_MAP_ALIGN
         return pages_map_align(size, alignment);
@@ -2164,17 +2158,17 @@ chunk_alloc_mmap(size_t size, size_t ali
         if (ret == NULL)
                 return (NULL);
         offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
         if (offset != 0) {
                 pages_unmap(ret, size);
                 return (chunk_alloc_mmap_slow(size, alignment));
         }
 
-        assert(ret != NULL);
+        MOZ_ASSERT(ret != NULL);
         return (ret);
 #endif
 }
 
 bool
 pages_purge(void *addr, size_t length)
 {
 	bool unzeroed;
@@ -2245,17 +2239,17 @@ chunk_recycle(extent_tree_t *chunks_szad
 	malloc_mutex_lock(&chunks_mtx);
 	node = extent_tree_szad_nsearch(chunks_szad, &key);
 	if (node == NULL) {
 		malloc_mutex_unlock(&chunks_mtx);
 		return (NULL);
 	}
 	leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
 	    (uintptr_t)node->addr;
-	assert(node->size >= leadsize + size);
+	MOZ_ASSERT(node->size >= leadsize + size);
 	trailsize = node->size - leadsize - size;
 	ret = (void *)((uintptr_t)node->addr + leadsize);
 	zeroed = node->zeroed;
 	if (zeroed)
 	    *zero = true;
 	/* Remove node from the tree. */
 	extent_tree_szad_remove(chunks_szad, node);
 	extent_tree_ad_remove(chunks_ad, node);
@@ -2305,17 +2299,17 @@ chunk_recycle(extent_tree_t *chunks_szad
 		if (zeroed == false)
 			memset(ret, 0, size);
 #ifdef DEBUG
 		else {
 			size_t i;
 			size_t *p = (size_t *)(uintptr_t)ret;
 
 			for (i = 0; i < size / sizeof(size_t); i++)
-				assert(p[i] == 0);
+				MOZ_ASSERT(p[i] == 0);
 		}
 #endif
 	}
 	return (ret);
 }
 
 #ifdef MOZ_MEMORY_WINDOWS
 /*
@@ -2329,20 +2323,20 @@ chunk_recycle(extent_tree_t *chunks_szad
 #define CAN_RECYCLE(size) true
 #endif
 
 static void *
 chunk_alloc(size_t size, size_t alignment, bool base, bool zero)
 {
 	void *ret;
 
-	assert(size != 0);
-	assert((size & chunksize_mask) == 0);
-	assert(alignment != 0);
-	assert((alignment & chunksize_mask) == 0);
+	MOZ_ASSERT(size != 0);
+	MOZ_ASSERT((size & chunksize_mask) == 0);
+	MOZ_ASSERT(alignment != 0);
+	MOZ_ASSERT((alignment & chunksize_mask) == 0);
 
 	if (CAN_RECYCLE(size)) {
 		ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap,
 			size, alignment, base, &zero);
 		if (ret != NULL)
 			goto RETURN;
 	}
 	ret = chunk_alloc_mmap(size, alignment);
@@ -2356,17 +2350,17 @@ RETURN:
 
 	if (ret != NULL && base == false) {
 		if (malloc_rtree_set(chunk_rtree, (uintptr_t)ret, ret)) {
 			chunk_dealloc(ret, size);
 			return (NULL);
 		}
 	}
 
-	assert(CHUNK_ADDR2BASE(ret) == ret);
+	MOZ_ASSERT(CHUNK_ADDR2BASE(ret) == ret);
 	return (ret);
 }
 
 static void
 chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
     size_t size)
 {
 	bool unzeroed;
@@ -2465,20 +2459,20 @@ chunk_dalloc_mmap(void *chunk, size_t si
 }
 
 #undef CAN_RECYCLE
 
 static void
 chunk_dealloc(void *chunk, size_t size)
 {
 
-	assert(chunk != NULL);
-	assert(CHUNK_ADDR2BASE(chunk) == chunk);
-	assert(size != 0);
-	assert((size & chunksize_mask) == 0);
+	MOZ_ASSERT(chunk != NULL);
+	MOZ_ASSERT(CHUNK_ADDR2BASE(chunk) == chunk);
+	MOZ_ASSERT(size != 0);
+	MOZ_ASSERT((size & chunksize_mask) == 0);
 
 	malloc_rtree_set(chunk_rtree, (uintptr_t)chunk, NULL);
 
 	if (chunk_dalloc_mmap(chunk, size))
 		chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
 }
 
 /*
@@ -2560,34 +2554,34 @@ choose_arena(void)
 }
 
 static inline int
 arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b)
 {
 	uintptr_t a_chunk = (uintptr_t)a;
 	uintptr_t b_chunk = (uintptr_t)b;
 
-	assert(a != NULL);
-	assert(b != NULL);
+	MOZ_ASSERT(a != NULL);
+	MOZ_ASSERT(b != NULL);
 
 	return ((a_chunk > b_chunk) - (a_chunk < b_chunk));
 }
 
 /* Wrap red-black tree macros in functions. */
 rb_wrap(static, arena_chunk_tree_dirty_, arena_chunk_tree_t,
     arena_chunk_t, link_dirty, arena_chunk_comp)
 
 static inline int
 arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
 {
 	uintptr_t a_mapelm = (uintptr_t)a;
 	uintptr_t b_mapelm = (uintptr_t)b;
 
-	assert(a != NULL);
-	assert(b != NULL);
+	MOZ_ASSERT(a != NULL);
+	MOZ_ASSERT(b != NULL);
 
 	return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
 }
 
 /* Wrap red-black tree macros in functions. */
 rb_wrap(static, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, link,
     arena_run_comp)
 
@@ -2624,32 +2618,32 @@ rb_wrap(static, arena_avail_tree_, arena
     arena_avail_comp)
 
 static inline void *
 arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
 {
 	void *ret;
 	unsigned i, mask, bit, regind;
 
-	assert(run->magic == ARENA_RUN_MAGIC);
-	assert(run->regs_minelm < bin->regs_mask_nelms);
+	MOZ_ASSERT(run->magic == ARENA_RUN_MAGIC);
+	MOZ_ASSERT(run->regs_minelm < bin->regs_mask_nelms);
 
 	/*
 	 * Move the first check outside the loop, so that run->regs_minelm can
 	 * be updated unconditionally, without the possibility of updating it
 	 * multiple times.
 	 */
 	i = run->regs_minelm;
 	mask = run->regs_mask[i];
 	if (mask != 0) {
 		/* Usable allocation found. */
 		bit = ffs((int)mask) - 1;
 
 		regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
-		assert(regind < bin->nregs);
+		MOZ_ASSERT(regind < bin->nregs);
 		ret = (void *)(((uintptr_t)run) + bin->reg0_offset
 		    + (bin->reg_size * regind));
 
 		/* Clear bit. */
 		mask ^= (1U << bit);
 		run->regs_mask[i] = mask;
 
 		return (ret);
@@ -2657,17 +2651,17 @@ arena_run_reg_alloc(arena_run_t *run, ar
 
 	for (i++; i < bin->regs_mask_nelms; i++) {
 		mask = run->regs_mask[i];
 		if (mask != 0) {
 			/* Usable allocation found. */
 			bit = ffs((int)mask) - 1;
 
 			regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
-			assert(regind < bin->nregs);
+			MOZ_ASSERT(regind < bin->nregs);
 			ret = (void *)(((uintptr_t)run) + bin->reg0_offset
 			    + (bin->reg_size * regind));
 
 			/* Clear bit. */
 			mask ^= (1U << bit);
 			run->regs_mask[i] = mask;
 
 			/*
@@ -2717,18 +2711,18 @@ arena_run_reg_dalloc(arena_run_t *run, a
 	    SIZE_INV(48), SIZE_INV(49), SIZE_INV(50), SIZE_INV(51),
 	    SIZE_INV(52), SIZE_INV(53), SIZE_INV(54), SIZE_INV(55),
 	    SIZE_INV(56), SIZE_INV(57), SIZE_INV(58), SIZE_INV(59),
 	    SIZE_INV(60), SIZE_INV(61), SIZE_INV(62), SIZE_INV(63)
 #endif
 	};
 	unsigned diff, regind, elm, bit;
 
-	assert(run->magic == ARENA_RUN_MAGIC);
-	assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3
+	MOZ_ASSERT(run->magic == ARENA_RUN_MAGIC);
+	MOZ_ASSERT(((sizeof(size_invs)) / sizeof(unsigned)) + 3
 	    >= (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN));
 
 	/*
 	 * Avoid doing division with a variable divisor if possible.  Using
 	 * actual division here can reduce allocator throughput by over 20%!
 	 */
 	diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->reg0_offset);
 	if ((size & (size - 1)) == 0) {
@@ -2795,18 +2789,18 @@ arena_run_split(arena_t *arena, arena_ru
 
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
 	old_ndirty = chunk->ndirty;
 	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
 	    >> pagesize_2pow);
 	total_pages = (chunk->map[run_ind].bits & ~pagesize_mask) >>
 	    pagesize_2pow;
 	need_pages = (size >> pagesize_2pow);
-	assert(need_pages > 0);
-	assert(need_pages <= total_pages);
+	MOZ_ASSERT(need_pages > 0);
+	MOZ_ASSERT(need_pages <= total_pages);
 	rem_pages = total_pages - need_pages;
 
 	arena_avail_tree_remove(&arena->runs_avail, &chunk->map[run_ind]);
 
 	/* Keep track of trailing unused pages for later use. */
 	if (rem_pages > 0) {
 		chunk->map[run_ind+need_pages].bits = (rem_pages <<
 		    pagesize_2pow) | (chunk->map[run_ind+need_pages].bits &
@@ -2831,18 +2825,18 @@ arena_run_split(arena_t *arena, arena_ru
 			/*
 			 * Advance i+j to just past the index of the last page
 			 * to commit.  Clear CHUNK_MAP_DECOMMITTED and
 			 * CHUNK_MAP_MADVISED along the way.
 			 */
 			for (j = 0; i + j < need_pages && (chunk->map[run_ind +
 			    i + j].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED); j++) {
 				/* DECOMMITTED and MADVISED are mutually exclusive. */
-				assert(!(chunk->map[run_ind + i + j].bits & CHUNK_MAP_DECOMMITTED &&
-					 chunk->map[run_ind + i + j].bits & CHUNK_MAP_MADVISED));
+				MOZ_ASSERT(!(chunk->map[run_ind + i + j].bits & CHUNK_MAP_DECOMMITTED &&
+					     chunk->map[run_ind + i + j].bits & CHUNK_MAP_MADVISED));
 
 				chunk->map[run_ind + i + j].bits &=
 				    ~CHUNK_MAP_MADVISED_OR_DECOMMITTED;
 			}
 
 #  ifdef MALLOC_DECOMMIT
 			pages_commit((void *)((uintptr_t)chunk + ((run_ind + i)
 			    << pagesize_2pow)), (j << pagesize_2pow));
@@ -2978,18 +2972,18 @@ arena_chunk_dealloc(arena_t *arena, aren
 
 static arena_run_t *
 arena_run_alloc(arena_t *arena, arena_bin_t *bin, size_t size, bool large,
     bool zero)
 {
 	arena_run_t *run;
 	arena_chunk_map_t *mapelm, key;
 
-	assert(size <= arena_maxclass);
-	assert((size & pagesize_mask) == 0);
+	MOZ_ASSERT(size <= arena_maxclass);
+	MOZ_ASSERT((size & pagesize_mask) == 0);
 
 	/* Search the arena's chunks for the lowest best fit. */
 	key.bits = size | CHUNK_MAP_KEY;
 	mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
 	if (mapelm != NULL) {
 		arena_chunk_t *chunk =
 		    (arena_chunk_t*)CHUNK_ADDR2BASE(mapelm);
 		size_t pageind = ((uintptr_t)mapelm -
@@ -3042,17 +3036,17 @@ arena_purge(arena_t *arena, bool all)
 	/* If all is set purge all dirty pages. */
 	size_t dirty_max = all ? 1 : opt_dirty_max;
 #ifdef MOZ_DEBUG
 	size_t ndirty = 0;
 	rb_foreach_begin(arena_chunk_t, link_dirty, &arena->chunks_dirty,
 	    chunk) {
 		ndirty += chunk->ndirty;
 	} rb_foreach_end(arena_chunk_t, link_dirty, &arena->chunks_dirty, chunk)
-	assert(ndirty == arena->ndirty);
+	MOZ_ASSERT(ndirty == arena->ndirty);
 #endif
 	RELEASE_ASSERT(all || (arena->ndirty > opt_dirty_max));
 
 	arena->stats.npurge++;
 
 	/*
 	 * Iterate downward through chunks until enough dirty memory has been
 	 * purged.  Terminate as soon as possible in order to minimize the
@@ -3070,27 +3064,27 @@ arena_purge(arena_t *arena, bool all)
 			RELEASE_ASSERT(i >= arena_chunk_header_npages);
 
 			if (chunk->map[i].bits & CHUNK_MAP_DIRTY) {
 #ifdef MALLOC_DECOMMIT
 				const size_t free_operation = CHUNK_MAP_DECOMMITTED;
 #else
 				const size_t free_operation = CHUNK_MAP_MADVISED;
 #endif
-				assert((chunk->map[i].bits &
-				        CHUNK_MAP_MADVISED_OR_DECOMMITTED) == 0);
+				MOZ_ASSERT((chunk->map[i].bits &
+				            CHUNK_MAP_MADVISED_OR_DECOMMITTED) == 0);
 				chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
 				/* Find adjacent dirty run(s). */
 				for (npages = 1;
 				     i > arena_chunk_header_npages &&
 				       (chunk->map[i - 1].bits & CHUNK_MAP_DIRTY);
 				     npages++) {
 					i--;
-					assert((chunk->map[i].bits &
-					        CHUNK_MAP_MADVISED_OR_DECOMMITTED) == 0);
+					MOZ_ASSERT((chunk->map[i].bits &
+					            CHUNK_MAP_MADVISED_OR_DECOMMITTED) == 0);
 					chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
 				}
 				chunk->ndirty -= npages;
 				arena->ndirty -= npages;
 
 #ifdef MALLOC_DECOMMIT
 				pages_decommit((void *)((uintptr_t)
 				    chunk + (i << pagesize_2pow)),
@@ -3240,17 +3234,17 @@ arena_run_dalloc(arena_t *arena, arena_r
 
 static void
 arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
     size_t oldsize, size_t newsize)
 {
 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
 	size_t head_npages = (oldsize - newsize) >> pagesize_2pow;
 
-	assert(oldsize > newsize);
+	MOZ_ASSERT(oldsize > newsize);
 
 	/*
 	 * Update the chunk map so that arena_run_dalloc() can treat the
 	 * leading run as separately allocated.
 	 */
 	chunk->map[pageind].bits = (oldsize - newsize) | CHUNK_MAP_LARGE |
 	    CHUNK_MAP_ALLOCATED;
 	chunk->map[pageind+head_npages].bits = newsize | CHUNK_MAP_LARGE |
@@ -3261,17 +3255,17 @@ arena_run_trim_head(arena_t *arena, aren
 
 static void
 arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
     size_t oldsize, size_t newsize, bool dirty)
 {
 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
 	size_t npages = newsize >> pagesize_2pow;
 
-	assert(oldsize > newsize);
+	MOZ_ASSERT(oldsize > newsize);
 
 	/*
 	 * Update the chunk map so that arena_run_dalloc() can treat the
 	 * trailing run as separately allocated.
 	 */
 	chunk->map[pageind].bits = newsize | CHUNK_MAP_LARGE |
 	    CHUNK_MAP_ALLOCATED;
 	chunk->map[pageind+npages].bits = (oldsize - newsize) | CHUNK_MAP_LARGE
@@ -3381,18 +3375,18 @@ arena_bin_malloc_hard(arena_t *arena, ar
  */
 static size_t
 arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
 {
 	size_t try_run_size, good_run_size;
 	unsigned good_nregs, good_mask_nelms, good_reg0_offset;
 	unsigned try_nregs, try_mask_nelms, try_reg0_offset;
 
-	assert(min_run_size >= pagesize);
-	assert(min_run_size <= arena_maxclass);
+	MOZ_ASSERT(min_run_size >= pagesize);
+	MOZ_ASSERT(min_run_size <= arena_maxclass);
 
 	/*
 	 * Calculate known-valid settings before entering the run_size
 	 * expansion loop, so that the first part of the loop always copies
 	 * valid settings.
 	 *
 	 * The do..while loop iteratively reduces the number of regions until
 	 * the run header and the regions no longer overlap.  A closed formula
@@ -3432,19 +3426,19 @@ arena_bin_run_size_calc(arena_bin_t *bin
 			try_reg0_offset = try_run_size - (try_nregs *
 			    bin->reg_size);
 		} while (sizeof(arena_run_t) + (sizeof(unsigned) *
 		    (try_mask_nelms - 1)) > try_reg0_offset);
 	} while (try_run_size <= arena_maxclass
 	    && RUN_MAX_OVRHD * (bin->reg_size << 3) > RUN_MAX_OVRHD_RELAX
 	    && (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size);
 
-	assert(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1))
+	MOZ_ASSERT(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1))
 	    <= good_reg0_offset);
-	assert((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
+	MOZ_ASSERT((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
 
 	/* Copy final settings. */
 	bin->run_size = good_run_size;
 	bin->nregs = good_nregs;
 	bin->regs_mask_nelms = good_mask_nelms;
 	bin->reg0_offset = good_reg0_offset;
 
 	return (good_run_size);
@@ -3535,32 +3529,32 @@ arena_malloc_large(arena_t *arena, size_
 
 	return (ret);
 }
 
 static inline void *
 arena_malloc(arena_t *arena, size_t size, bool zero)
 {
 
-	assert(arena != NULL);
+	MOZ_ASSERT(arena != NULL);
 	RELEASE_ASSERT(arena->magic == ARENA_MAGIC);
-	assert(size != 0);
-	assert(QUANTUM_CEILING(size) <= arena_maxclass);
+	MOZ_ASSERT(size != 0);
+	MOZ_ASSERT(QUANTUM_CEILING(size) <= arena_maxclass);
 
 	if (size <= bin_maxclass) {
 		return (arena_malloc_small(arena, size, zero));
 	} else
 		return (arena_malloc_large(arena, size, zero));
 }
 
 static inline void *
 imalloc(size_t size)
 {
 
-	assert(size != 0);
+	MOZ_ASSERT(size != 0);
 
 	if (size <= arena_maxclass)
 		return (arena_malloc(choose_arena(), size, false));
 	else
 		return (huge_malloc(size, false));
 }
 
 static inline void *
@@ -3576,47 +3570,47 @@ icalloc(size_t size)
 /* Only handles large allocations that require more than page alignment. */
 static void *
 arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
 {
 	void *ret;
 	size_t offset;
 	arena_chunk_t *chunk;
 
-	assert((size & pagesize_mask) == 0);
-	assert((alignment & pagesize_mask) == 0);
+	MOZ_ASSERT((size & pagesize_mask) == 0);
+	MOZ_ASSERT((alignment & pagesize_mask) == 0);
 
 	malloc_spin_lock(&arena->lock);
 	ret = (void *)arena_run_alloc(arena, NULL, alloc_size, true, false);
 	if (ret == NULL) {
 		malloc_spin_unlock(&arena->lock);
 		return (NULL);
 	}
 
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
 
 	offset = (uintptr_t)ret & (alignment - 1);
-	assert((offset & pagesize_mask) == 0);
-	assert(offset < alloc_size);
+	MOZ_ASSERT((offset & pagesize_mask) == 0);
+	MOZ_ASSERT(offset < alloc_size);
 	if (offset == 0)
 		arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, alloc_size, size, false);
 	else {
 		size_t leadsize, trailsize;
 
 		leadsize = alignment - offset;
 		if (leadsize > 0) {
 			arena_run_trim_head(arena, chunk, (arena_run_t*)ret, alloc_size,
 			    alloc_size - leadsize);
 			ret = (void *)((uintptr_t)ret + leadsize);
 		}
 
 		trailsize = alloc_size - leadsize - size;
 		if (trailsize != 0) {
 			/* Trim trailing space. */
-			assert(trailsize < alloc_size);
+			MOZ_ASSERT(trailsize < alloc_size);
 			arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, size + trailsize,
 			    size, false);
 		}
 	}
 
 	arena->stats.nmalloc_large++;
 	arena->stats.allocated_large += size;
 	malloc_spin_unlock(&arena->lock);
@@ -3714,30 +3708,30 @@ ipalloc(size_t alignment, size_t size)
 			ret = arena_palloc(choose_arena(), alignment, ceil_size,
 			    run_size);
 		} else if (alignment <= chunksize)
 			ret = huge_malloc(ceil_size, false);
 		else
 			ret = huge_palloc(ceil_size, alignment, false);
 	}
 
-	assert(((uintptr_t)ret & (alignment - 1)) == 0);
+	MOZ_ASSERT(((uintptr_t)ret & (alignment - 1)) == 0);
 	return (ret);
 }
 
 /* Return the size of the allocation pointed to by ptr. */
 static size_t
 arena_salloc(const void *ptr)
 {
 	size_t ret;
 	arena_chunk_t *chunk;
 	size_t pageind, mapbits;
 
-	assert(ptr != NULL);
-	assert(CHUNK_ADDR2BASE(ptr) != ptr);
+	MOZ_ASSERT(ptr != NULL);
+	MOZ_ASSERT(CHUNK_ADDR2BASE(ptr) != ptr);
 
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 	pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
 	mapbits = chunk->map[pageind].bits;
 	RELEASE_ASSERT((mapbits & CHUNK_MAP_ALLOCATED) != 0);
 	if ((mapbits & CHUNK_MAP_LARGE) == 0) {
 		arena_run_t *run = (arena_run_t *)(mapbits & ~pagesize_mask);
 		RELEASE_ASSERT(run->magic == ARENA_RUN_MAGIC);
@@ -3792,22 +3786,22 @@ isalloc_validate(const void *ptr)
 }
 
 static inline size_t
 isalloc(const void *ptr)
 {
 	size_t ret;
 	arena_chunk_t *chunk;
 
-	assert(ptr != NULL);
+	MOZ_ASSERT(ptr != NULL);
 
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 	if (chunk != ptr) {
 		/* Region. */
-		assert(chunk->arena->magic == ARENA_MAGIC);
+		MOZ_ASSERT(chunk->arena->magic == ARENA_MAGIC);
 
 		ret = arena_salloc(ptr);
 	} else {
 		extent_node_t *node, key;
 
 		/* Chunk (huge allocation). */
 
 		malloc_mutex_lock(&huge_mtx);
@@ -3923,23 +3917,23 @@ arena_dalloc_large(arena_t *arena, arena
 static inline void
 arena_dalloc(void *ptr, size_t offset)
 {
 	arena_chunk_t *chunk;
 	arena_t *arena;
 	size_t pageind;
 	arena_chunk_map_t *mapelm;
 
-	assert(ptr != NULL);
-	assert(offset != 0);
-	assert(CHUNK_ADDR2OFFSET(ptr) == offset);
+	MOZ_ASSERT(ptr != NULL);
+	MOZ_ASSERT(offset != 0);
+	MOZ_ASSERT(CHUNK_ADDR2OFFSET(ptr) == offset);
 
 	chunk = (arena_chunk_t *) ((uintptr_t)ptr - offset);
 	arena = chunk->arena;
-	assert(arena != NULL);
+	MOZ_ASSERT(arena != NULL);
 	RELEASE_ASSERT(arena->magic == ARENA_MAGIC);
 
 	malloc_spin_lock(&arena->lock);
 	pageind = offset >> pagesize_2pow;
 	mapelm = &chunk->map[pageind];
 	RELEASE_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
 	if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
 		/* Small allocation. */
@@ -3951,31 +3945,31 @@ arena_dalloc(void *ptr, size_t offset)
 	malloc_spin_unlock(&arena->lock);
 }
 
 static inline void
 idalloc(void *ptr)
 {
 	size_t offset;
 
-	assert(ptr != NULL);
+	MOZ_ASSERT(ptr != NULL);
 
 	offset = CHUNK_ADDR2OFFSET(ptr);
 	if (offset != 0)
 		arena_dalloc(ptr, offset);
 	else
 		huge_dalloc(ptr);
 }
 
 static void
 arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
     size_t size, size_t oldsize)
 {
 
-	assert(size < oldsize);
+	MOZ_ASSERT(size < oldsize);
 
 	/*
 	 * Shrink the run, and make trailing pages available for other
 	 * allocations.
 	 */
 	malloc_spin_lock(&arena->lock);
 	arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
 	    true);
@@ -3989,17 +3983,17 @@ arena_ralloc_large_grow(arena_t *arena, 
 {
 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow;
 	size_t npages = oldsize >> pagesize_2pow;
 
 	malloc_spin_lock(&arena->lock);
 	RELEASE_ASSERT(oldsize == (chunk->map[pageind].bits & ~pagesize_mask));
 
 	/* Try to extend the run. */
-	assert(size > oldsize);
+	MOZ_ASSERT(size > oldsize);
 	if (pageind + npages < chunk_npages && (chunk->map[pageind+npages].bits
 	    & CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[pageind+npages].bits &
 	    ~pagesize_mask) >= size - oldsize) {
 		/*
 		 * The next run is available and sufficiently large.  Split the
 		 * following run, then merge the first part with the existing
 		 * allocation.
 		 */
@@ -4082,17 +4076,17 @@ arena_ralloc(void *ptr, size_t size, siz
 		    (QUANTUM_CEILING(size) >> opt_quantum_2pow)
 		    == (QUANTUM_CEILING(oldsize) >> opt_quantum_2pow))
 			goto IN_PLACE; /* Same size class. */
 	} else if (size <= bin_maxclass) {
 		if (oldsize > small_max && oldsize <= bin_maxclass &&
 		    pow2_ceil(size) == pow2_ceil(oldsize))
 			goto IN_PLACE; /* Same size class. */
 	} else if (oldsize > bin_maxclass && oldsize <= arena_maxclass) {
-		assert(size > bin_maxclass);
+		MOZ_ASSERT(size > bin_maxclass);
 		if (arena_ralloc_large(ptr, size, oldsize) == false)
 			return (ptr);
 	}
 
 	/*
 	 * If we get here, then size and oldsize are different enough that we
 	 * need to move the object.  In that case, fall back to allocating new
 	 * space and copying.
@@ -4119,18 +4113,18 @@ IN_PLACE:
 	return (ptr);
 }
 
 static inline void *
 iralloc(void *ptr, size_t size)
 {
 	size_t oldsize;
 
-	assert(ptr != NULL);
-	assert(size != 0);
+	MOZ_ASSERT(ptr != NULL);
+	MOZ_ASSERT(size != 0);
 
 	oldsize = isalloc(ptr);
 
 	if (size <= arena_maxclass)
 		return (arena_ralloc(ptr, size, oldsize));
 	else
 		return (huge_ralloc(ptr, size, oldsize));
 }
@@ -4392,18 +4386,18 @@ huge_ralloc(void *ptr, size_t size, size
 
 			pages_decommit((void *)((uintptr_t)ptr + psize),
 			    oldsize - psize);
 
 			/* Update recorded size. */
 			malloc_mutex_lock(&huge_mtx);
 			key.addr = const_cast<void*>(ptr);
 			node = extent_tree_ad_search(&huge, &key);
-			assert(node != NULL);
-			assert(node->size == oldsize);
+			MOZ_ASSERT(node != NULL);
+			MOZ_ASSERT(node->size == oldsize);
 			huge_allocated -= oldsize - psize;
 			/* No need to change huge_mapped, because we didn't
 			 * (un)map anything. */
 			node->size = psize;
 			malloc_mutex_unlock(&huge_mtx);
 		} else if (psize > oldsize) {
 			pages_commit((void *)((uintptr_t)ptr + oldsize),
 			    psize - oldsize);
@@ -4417,18 +4411,18 @@ huge_ralloc(void *ptr, size_t size, size
                  * what was requested via realloc(). */
 
                 if (psize > oldsize) {
                         /* Update recorded size. */
                         extent_node_t *node, key;
                         malloc_mutex_lock(&huge_mtx);
                         key.addr = const_cast<void*>(ptr);
                         node = extent_tree_ad_search(&huge, &key);
-                        assert(node != NULL);
-                        assert(node->size == oldsize);
+                        MOZ_ASSERT(node != NULL);
+                        MOZ_ASSERT(node->size == oldsize);
                         huge_allocated += psize - oldsize;
 			/* No need to change huge_mapped, because we didn't
 			 * (un)map anything. */
                         node->size = psize;
                         malloc_mutex_unlock(&huge_mtx);
                 }
 
 		if (opt_zero && size > oldsize) {
@@ -4463,18 +4457,18 @@ huge_dalloc(void *ptr)
 {
 	extent_node_t *node, key;
 
 	malloc_mutex_lock(&huge_mtx);
 
 	/* Extract from tree of huge allocations. */
 	key.addr = ptr;
 	node = extent_tree_ad_search(&huge, &key);
-	assert(node != NULL);
-	assert(node->addr == ptr);
+	MOZ_ASSERT(node != NULL);
+	MOZ_ASSERT(node->addr == ptr);
 	extent_tree_ad_remove(&huge, node);
 
 	huge_ndalloc++;
 	huge_allocated -= node->size;
 	huge_mapped -= CHUNK_CEILING(node->size);
 
 	malloc_mutex_unlock(&huge_mtx);
 
@@ -4657,21 +4651,21 @@ malloc_init_hard(void)
 		SYSTEM_INFO info;
 
 		GetSystemInfo(&info);
 		result = info.dwPageSize;
 
 	}
 #else
 	result = sysconf(_SC_PAGESIZE);
-	assert(result != -1);
+	MOZ_ASSERT(result != -1);
 #endif
 
 	/* We assume that the page size is a power of 2. */
-	assert(((result - 1) & result) == 0);
+	MOZ_ASSERT(((result - 1) & result) == 0);
 #ifdef MALLOC_STATIC_SIZES
 	if (pagesize % (size_t) result) {
 		_malloc_message(_getprogname(),
 				"Compile-time page size does not divide the runtime one.\n",
 				"", "");
 		moz_abort();
 	}
 #else
@@ -4813,30 +4807,30 @@ MALLOC_OUT:
 #ifndef MALLOC_STATIC_SIZES
 	/* Set variables according to the value of opt_small_max_2pow. */
 	if (opt_small_max_2pow < opt_quantum_2pow)
 		opt_small_max_2pow = opt_quantum_2pow;
 	small_max = (1U << opt_small_max_2pow);
 
 	/* Set bin-related variables. */
 	bin_maxclass = (pagesize >> 1);
-	assert(opt_quantum_2pow >= TINY_MIN_2POW);
+	MOZ_ASSERT(opt_quantum_2pow >= TINY_MIN_2POW);
 	ntbins = opt_quantum_2pow - TINY_MIN_2POW;
-	assert(ntbins <= opt_quantum_2pow);
+	MOZ_ASSERT(ntbins <= opt_quantum_2pow);
 	nqbins = (small_max >> opt_quantum_2pow);
 	nsbins = pagesize_2pow - opt_small_max_2pow - 1;
 
 	/* Set variables according to the value of opt_quantum_2pow. */
 	quantum = (1U << opt_quantum_2pow);
 	quantum_mask = quantum - 1;
 	if (ntbins > 0)
 		small_min = (quantum >> 1) + 1;
 	else
 		small_min = 1;
-	assert(small_min <= quantum);
+	MOZ_ASSERT(small_min <= quantum);
 
 	/* Set variables according to the value of opt_chunk_2pow. */
 	chunksize = (1LU << opt_chunk_2pow);
 	chunksize_mask = chunksize - 1;
 	chunk_npages = (chunksize >> pagesize_2pow);
 
 	arena_chunk_header_npages = calculate_arena_header_pages();
 	arena_maxclass = calculate_arena_maxclass();
@@ -4846,25 +4840,25 @@ MALLOC_OUT:
 
 	recycled_size = 0;
 
 #ifdef JEMALLOC_USES_MAP_ALIGN
 	/*
 	 * When using MAP_ALIGN, the alignment parameter must be a power of two
 	 * multiple of the system pagesize, or mmap will fail.
 	 */
-	assert((chunksize % pagesize) == 0);
-	assert((1 << (ffs(chunksize / pagesize) - 1)) == (chunksize/pagesize));
+	MOZ_ASSERT((chunksize % pagesize) == 0);
+	MOZ_ASSERT((1 << (ffs(chunksize / pagesize) - 1)) == (chunksize/pagesize));
 #endif
 
 	/* Various sanity checks that regard configuration. */
-	assert(quantum >= sizeof(void *));
-	assert(quantum <= pagesize);
-	assert(chunksize >= pagesize);
-	assert(quantum * 4 <= chunksize);
+	MOZ_ASSERT(quantum >= sizeof(void *));
+	MOZ_ASSERT(quantum <= pagesize);
+	MOZ_ASSERT(chunksize >= pagesize);
+	MOZ_ASSERT(quantum * 4 <= chunksize);
 
 	/* Initialize chunks data. */
 	malloc_mutex_init(&chunks_mtx);
 	extent_tree_szad_new(&chunks_szad_mmap);
 	extent_tree_ad_new(&chunks_ad_mmap);
 
 	/* Initialize huge allocation data. */
 	malloc_mutex_init(&huge_mtx);
@@ -5002,17 +4996,17 @@ extern "C"
 #define MEMALIGN memalign_impl
 MOZ_MEMORY_API
 #endif
 void *
 MEMALIGN(size_t alignment, size_t size)
 {
 	void *ret;
 
-	assert(((alignment - 1) & alignment) == 0);
+	MOZ_ASSERT(((alignment - 1) & alignment) == 0);
 
 	if (malloc_init()) {
 		ret = NULL;
 		goto RETURN;
 	}
 
 	if (size == 0) {
 		size = 1;
@@ -5139,17 +5133,17 @@ realloc_impl(void *ptr, size_t size)
 {
 	void *ret;
 
 	if (size == 0) {
 		size = 1;
 	}
 
 	if (ptr != NULL) {
-		assert(malloc_initialized);
+		MOZ_ASSERT(malloc_initialized);
 
 		ret = iralloc(ptr, size);
 
 		if (ret == NULL) {
 #ifdef MALLOC_XMALLOC
 			if (opt_xmalloc) {
 				_malloc_message(_getprogname(),
 				    ": (malloc) Error in realloc(): out of "
@@ -5185,17 +5179,17 @@ MOZ_MEMORY_API void
 free_impl(void *ptr)
 {
 	size_t offset;
 
 	/*
 	 * A version of idalloc that checks for NULL pointer but only for
 	 * huge allocations assuming that CHUNK_ADDR2OFFSET(NULL) == 0.
 	 */
-	assert(CHUNK_ADDR2OFFSET(NULL) == 0);
+	MOZ_ASSERT(CHUNK_ADDR2OFFSET(NULL) == 0);
 	offset = CHUNK_ADDR2OFFSET(ptr);
 	if (offset != 0)
 		arena_dalloc(ptr, offset);
 	else if (ptr != NULL)
 		huge_dalloc(ptr);
 }
 
 /*
@@ -5252,17 +5246,17 @@ malloc_usable_size_impl(MALLOC_USABLE_SI
 	return (isalloc_validate(ptr));
 }
 
 MOZ_JEMALLOC_API void
 jemalloc_stats_impl(jemalloc_stats_t *stats)
 {
 	size_t i, non_arena_mapped, chunk_header_size;
 
-	assert(stats != NULL);
+	MOZ_ASSERT(stats != NULL);
 
 	/*
 	 * Gather runtime settings.
 	 */
 	stats->opt_abort = opt_abort;
 	stats->opt_junk = opt_junk;
 	stats->opt_xmalloc =
 #ifdef MALLOC_XMALLOC
@@ -5288,24 +5282,24 @@ jemalloc_stats_impl(jemalloc_stats_t *st
 	stats->bin_unused = 0;
 
 	non_arena_mapped = 0;
 
 	/* Get huge mapped/allocated. */
 	malloc_mutex_lock(&huge_mtx);
 	non_arena_mapped += huge_mapped;
 	stats->allocated += huge_allocated;
-	assert(huge_mapped >= huge_allocated);
+	MOZ_ASSERT(huge_mapped >= huge_allocated);
 	malloc_mutex_unlock(&huge_mtx);
 
 	/* Get base mapped/allocated. */
 	malloc_mutex_lock(&base_mtx);
 	non_arena_mapped += base_mapped;
 	stats->bookkeeping += base_committed;
-	assert(base_mapped >= base_committed);
+	MOZ_ASSERT(base_mapped >= base_committed);
 	malloc_mutex_unlock(&base_mtx);
 
 	malloc_spin_lock(&arenas_lock);
 	/* Iterate over arenas. */
 	for (i = 0; i < narenas; i++) {
 		arena_t *arena = arenas[i];
 		size_t arena_mapped, arena_allocated, arena_committed, arena_dirty, j,
 		    arena_unused, arena_headers;
@@ -5345,18 +5339,18 @@ jemalloc_stats_impl(jemalloc_stats_t *st
 			}
 
 			arena_unused += bin_unused;
 			arena_headers += bin->stats.curruns * bin->reg0_offset;
 		}
 
 		malloc_spin_unlock(&arena->lock);
 
-		assert(arena_mapped >= arena_committed);
-		assert(arena_committed >= arena_allocated + arena_dirty);
+		MOZ_ASSERT(arena_mapped >= arena_committed);
+		MOZ_ASSERT(arena_committed >= arena_allocated + arena_dirty);
 
 		/* "waste" is committed memory that is neither dirty nor
 		 * allocated. */
 		stats->mapped += arena_mapped;
 		stats->allocated += arena_allocated;
 		stats->page_cache += arena_dirty;
 		stats->waste += arena_committed -
 		    arena_allocated - arena_dirty - arena_unused - arena_headers;
@@ -5369,17 +5363,17 @@ jemalloc_stats_impl(jemalloc_stats_t *st
 	chunk_header_size =
 	    ((stats->mapped / stats->chunksize) * arena_chunk_header_npages) <<
 	    pagesize_2pow;
 
 	stats->mapped += non_arena_mapped;
 	stats->bookkeeping += chunk_header_size;
 	stats->waste -= chunk_header_size;
 
-	assert(stats->mapped >= stats->allocated + stats->waste +
+	MOZ_ASSERT(stats->mapped >= stats->allocated + stats->waste +
 				stats->page_cache + stats->bookkeeping);
 }
 
 #ifdef MALLOC_DOUBLE_PURGE
 
 /* Explicitly remove all of this chunk's MADV_FREE'd pages from memory. */
 static void
 hard_purge_chunk(arena_chunk_t *chunk)
--- a/memory/mozjemalloc/rb.h
+++ b/memory/mozjemalloc/rb.h
@@ -33,21 +33,16 @@
  *
  * Usage:
  *
  *   (Optional.)
  *   #define SIZEOF_PTR ...
  *   #define SIZEOF_PTR_2POW ...
  *   #define RB_NO_C99_VARARRAYS
  *
- *   (Optional, see assert(3).)
- *   #define NDEBUG
- *
- *   (Required.)
- *   #include <assert.h>
  *   #include <rb.h>
  *   ...
  *
  * All operations are done non-recursively.  Parent pointers are not used, and
  * color bits are stored in the least significant bit of right-child pointers,
  * thus making node linkage as compact as is possible for red-black trees.
  *
  * Some macros use a comparison function pointer, which is expected to have the
@@ -162,52 +157,52 @@ struct {								\
 
 #define	rbp_next(a_type, a_field, a_cmp, a_tree, a_node, r_node) do {	\
     if (rbp_right_get(a_type, a_field, (a_node))			\
       != &(a_tree)->rbt_nil) {						\
 	rbp_first(a_type, a_field, a_tree, rbp_right_get(a_type,	\
 	  a_field, (a_node)), (r_node));				\
     } else {								\
 	a_type *rbp_n_t = (a_tree)->rbt_root;				\
-	assert(rbp_n_t != &(a_tree)->rbt_nil);				\
+	MOZ_ASSERT(rbp_n_t != &(a_tree)->rbt_nil);			\
 	(r_node) = &(a_tree)->rbt_nil;					\
 	while (true) {							\
 	    int rbp_n_cmp = (a_cmp)((a_node), rbp_n_t);			\
 	    if (rbp_n_cmp < 0) {					\
 		(r_node) = rbp_n_t;					\
 		rbp_n_t = rbp_left_get(a_type, a_field, rbp_n_t);	\
 	    } else if (rbp_n_cmp > 0) {					\
 		rbp_n_t = rbp_right_get(a_type, a_field, rbp_n_t);	\
 	    } else {							\
 		break;							\
 	    }								\
-	    assert(rbp_n_t != &(a_tree)->rbt_nil);			\
+	    MOZ_ASSERT(rbp_n_t != &(a_tree)->rbt_nil);			\
 	}								\
     }									\
 } while (0)
 
 #define	rbp_prev(a_type, a_field, a_cmp, a_tree, a_node, r_node) do {	\
     if (rbp_left_get(a_type, a_field, (a_node)) != &(a_tree)->rbt_nil) {\
 	rbp_last(a_type, a_field, a_tree, rbp_left_get(a_type,		\
 	  a_field, (a_node)), (r_node));				\
     } else {								\
 	a_type *rbp_p_t = (a_tree)->rbt_root;				\
-	assert(rbp_p_t != &(a_tree)->rbt_nil);				\
+	MOZ_ASSERT(rbp_p_t != &(a_tree)->rbt_nil);			\
 	(r_node) = &(a_tree)->rbt_nil;					\
 	while (true) {							\
 	    int rbp_p_cmp = (a_cmp)((a_node), rbp_p_t);			\
 	    if (rbp_p_cmp < 0) {					\
 		rbp_p_t = rbp_left_get(a_type, a_field, rbp_p_t);	\
 	    } else if (rbp_p_cmp > 0) {					\
 		(r_node) = rbp_p_t;					\
 		rbp_p_t = rbp_right_get(a_type, a_field, rbp_p_t);	\
 	    } else {							\
 		break;							\
 	    }								\
-	    assert(rbp_p_t != &(a_tree)->rbt_nil);			\
+	    MOZ_ASSERT(rbp_p_t != &(a_tree)->rbt_nil);			\
 	}								\
     }									\
 } while (0)
 
 #define	rb_first(a_type, a_field, a_tree, r_node) do {			\
     rbp_first(a_type, a_field, a_tree, (a_tree)->rbt_root, (r_node));	\
     if ((r_node) == &(a_tree)->rbt_nil) {				\
 	(r_node) = NULL;						\
@@ -400,45 +395,45 @@ struct {								\
 	    rbp_black_set(a_type, a_field, rbp_i_u);			\
 	    if (rbp_left_get(a_type, a_field, rbp_i_p) == rbp_i_c) {	\
 		rbp_left_set(a_type, a_field, rbp_i_p, rbp_i_t);	\
 		rbp_i_c = rbp_i_t;					\
 	    } else {							\
 		/* rbp_i_c was the right child of rbp_i_p, so rotate  */\
 		/* left in order to maintain the left-leaning         */\
 		/* invariant.                                         */\
-		assert(rbp_right_get(a_type, a_field, rbp_i_p)		\
+		MOZ_ASSERT(rbp_right_get(a_type, a_field, rbp_i_p)	\
 		  == rbp_i_c);						\
 		rbp_right_set(a_type, a_field, rbp_i_p, rbp_i_t);	\
 		rbp_lean_left(a_type, a_field, rbp_i_p, rbp_i_u);	\
 		if (rbp_left_get(a_type, a_field, rbp_i_g) == rbp_i_p) {\
 		    rbp_left_set(a_type, a_field, rbp_i_g, rbp_i_u);	\
 		} else {						\
-		    assert(rbp_right_get(a_type, a_field, rbp_i_g)	\
+		    MOZ_ASSERT(rbp_right_get(a_type, a_field, rbp_i_g)	\
 		      == rbp_i_p);					\
 		    rbp_right_set(a_type, a_field, rbp_i_g, rbp_i_u);	\
 		}							\
 		rbp_i_p = rbp_i_u;					\
 		rbp_i_cmp = (a_cmp)((a_node), rbp_i_p);			\
 		if (rbp_i_cmp < 0) {					\
 		    rbp_i_c = rbp_left_get(a_type, a_field, rbp_i_p);	\
 		} else {						\
-		    assert(rbp_i_cmp > 0);				\
+		    MOZ_ASSERT(rbp_i_cmp > 0);				\
 		    rbp_i_c = rbp_right_get(a_type, a_field, rbp_i_p);	\
 		}							\
 		continue;						\
 	    }								\
 	}								\
 	rbp_i_g = rbp_i_p;						\
 	rbp_i_p = rbp_i_c;						\
 	rbp_i_cmp = (a_cmp)((a_node), rbp_i_c);				\
 	if (rbp_i_cmp < 0) {						\
 	    rbp_i_c = rbp_left_get(a_type, a_field, rbp_i_c);		\
 	} else {							\
-	    assert(rbp_i_cmp > 0);					\
+	    MOZ_ASSERT(rbp_i_cmp > 0);					\
 	    rbp_i_c = rbp_right_get(a_type, a_field, rbp_i_c);		\
 	}								\
     }									\
     /* rbp_i_p now refers to the node under which to insert.          */\
     rbp_node_new(a_type, a_field, a_tree, (a_node));			\
     if (rbp_i_cmp > 0) {						\
 	rbp_right_set(a_type, a_field, rbp_i_p, (a_node));		\
 	rbp_lean_left(a_type, a_field, rbp_i_p, rbp_i_t);		\
@@ -483,17 +478,17 @@ struct {								\
 	    rbp_r_c = rbp_r_t;						\
 	} else {							\
 	    /* Move left.                                             */\
 	    rbp_r_p = rbp_r_c;						\
 	    rbp_r_c = rbp_left_get(a_type, a_field, rbp_r_c);		\
 	}								\
     } else {								\
 	if (rbp_r_cmp == 0) {						\
-	    assert((a_node) == rbp_r_c);				\
+	    MOZ_ASSERT((a_node) == rbp_r_c);				\
 	    if (rbp_right_get(a_type, a_field, rbp_r_c)			\
 	      == &(a_tree)->rbt_nil) {					\
 		/* Delete root node (which is also a leaf node).      */\
 		if (rbp_left_get(a_type, a_field, rbp_r_c)		\
 		  != &(a_tree)->rbt_nil) {				\
 		    rbp_lean_right(a_type, a_field, rbp_r_c, rbp_r_t);	\
 		    rbp_right_set(a_type, a_field, rbp_r_t,		\
 		      &(a_tree)->rbt_nil);				\
@@ -543,46 +538,46 @@ struct {								\
 		/* Move right.                                        */\
 		rbp_r_p = rbp_r_c;					\
 		rbp_r_c = rbp_right_get(a_type, a_field, rbp_r_c);	\
 	    }								\
 	}								\
     }									\
     if (rbp_r_cmp != 0) {						\
 	while (true) {							\
-	    assert(rbp_r_p != &(a_tree)->rbt_nil);			\
+	    MOZ_ASSERT(rbp_r_p != &(a_tree)->rbt_nil);			\
 	    rbp_r_cmp = (a_cmp)((a_node), rbp_r_c);			\
 	    if (rbp_r_cmp < 0) {					\
 		rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c);	\
 		if (rbp_r_t == &(a_tree)->rbt_nil) {			\
 		    /* rbp_r_c now refers to the successor node to    */\
 		    /* relocate, and rbp_r_xp/a_node refer to the     */\
 		    /* context for the relocation.                    */\
 		    if (rbp_left_get(a_type, a_field, rbp_r_xp)		\
 		      == (a_node)) {					\
 			rbp_left_set(a_type, a_field, rbp_r_xp,		\
 			  rbp_r_c);					\
 		    } else {						\
-			assert(rbp_right_get(a_type, a_field,		\
+			MOZ_ASSERT(rbp_right_get(a_type, a_field,	\
 			  rbp_r_xp) == (a_node));			\
 			rbp_right_set(a_type, a_field, rbp_r_xp,	\
 			  rbp_r_c);					\
 		    }							\
 		    rbp_left_set(a_type, a_field, rbp_r_c,		\
 		      rbp_left_get(a_type, a_field, (a_node)));		\
 		    rbp_right_set(a_type, a_field, rbp_r_c,		\
 		      rbp_right_get(a_type, a_field, (a_node)));	\
 		    rbp_color_set(a_type, a_field, rbp_r_c,		\
 		      rbp_red_get(a_type, a_field, (a_node)));		\
 		    if (rbp_left_get(a_type, a_field, rbp_r_p)		\
 		      == rbp_r_c) {					\
 			rbp_left_set(a_type, a_field, rbp_r_p,		\
 			  &(a_tree)->rbt_nil);				\
 		    } else {						\
-			assert(rbp_right_get(a_type, a_field, rbp_r_p)	\
+			MOZ_ASSERT(rbp_right_get(a_type, a_field, rbp_r_p)\
 			  == rbp_r_c);					\
 			rbp_right_set(a_type, a_field, rbp_r_p,		\
 			  &(a_tree)->rbt_nil);				\
 		    }							\
 		    break;						\
 		}							\
 		rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t);	\
 		if (rbp_red_get(a_type, a_field, rbp_r_t) == false	\
@@ -600,17 +595,17 @@ struct {								\
 		} else {						\
 		    rbp_r_p = rbp_r_c;					\
 		    rbp_r_c = rbp_left_get(a_type, a_field, rbp_r_c);	\
 		}							\
 	    } else {							\
 		/* Check whether to delete this node (it has to be    */\
 		/* the correct node and a leaf node).                 */\
 		if (rbp_r_cmp == 0) {					\
-		    assert((a_node) == rbp_r_c);			\
+		    MOZ_ASSERT((a_node) == rbp_r_c);			\
 		    if (rbp_right_get(a_type, a_field, rbp_r_c)		\
 		      == &(a_tree)->rbt_nil) {				\
 			/* Delete leaf node.                          */\
 			if (rbp_left_get(a_type, a_field, rbp_r_c)	\
 			  != &(a_tree)->rbt_nil) {			\
 			    rbp_lean_right(a_type, a_field, rbp_r_c,	\
 			      rbp_r_t);					\
 			    rbp_right_set(a_type, a_field, rbp_r_t,	\