Bug 1365460 - Replace MOZ_MEMORY_DEBUG, MALLOC_DEBUG and !MALLOC_PRODUCTION with MOZ_DEBUG. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Thu, 18 May 2017 10:22:20 +0900
changeset 580753 3c6592e2c5f037657a0861aeed7ed209c08373c5
parent 580752 b372e0747df72c4517efe13fa8855df9604afdfb
child 580754 17fe701cb4a730ac4bcef7c0a2132f97c029e214
push id59654
push userbmo:mh+mozilla@glandium.org
push dateThu, 18 May 2017 22:44:22 +0000
reviewersnjn
bugs1365460
milestone55.0a1
Bug 1365460 - Replace MOZ_MEMORY_DEBUG, MALLOC_DEBUG and !MALLOC_PRODUCTION with MOZ_DEBUG. r?njn
js/src/old-configure.in
memory/mozjemalloc/mozjemalloc.cpp
old-configure.in
--- a/js/src/old-configure.in
+++ b/js/src/old-configure.in
@@ -1532,19 +1532,16 @@ Android|WINNT|Darwin)
 *)
   dnl On !Android !Windows !OSX, we only want to link executables against mozglue
   MOZ_GLUE_IN_PROGRAM=1
   AC_DEFINE(MOZ_GLUE_IN_PROGRAM)
   ;;
 esac
 
 if test "$MOZ_MEMORY"; then
-  if test "x$MOZ_DEBUG" = "x1"; then
-    AC_DEFINE(MOZ_MEMORY_DEBUG)
-  fi
   dnl The generic feature tests that determine how to compute ncpus are long and
   dnl complicated.  Therefore, simply define special cpp variables for the
   dnl platforms we have special knowledge of.
   case "${target}" in
   *-mingw*)
     export MOZ_NO_DEBUG_RTL=1
     ;;
   esac
--- a/memory/mozjemalloc/mozjemalloc.cpp
+++ b/memory/mozjemalloc/mozjemalloc.cpp
@@ -127,32 +127,17 @@
  *
  * The jemalloc_purge_freed_pages definition in memory/build/mozmemory.h needs
  * to be adjusted if MALLOC_DOUBLE_PURGE is ever enabled on Linux.
  */
 #ifdef MOZ_MEMORY_DARWIN
 #define MALLOC_DOUBLE_PURGE
 #endif
 
-/*
- * MALLOC_PRODUCTION disables assertions and statistics gathering.  It also
- * defaults the A and J runtime options to off.  These settings are appropriate
- * for production systems.
- */
-#ifndef MOZ_MEMORY_DEBUG
-#  define	MALLOC_PRODUCTION
-#endif
-
-#ifndef MALLOC_PRODUCTION
-   /*
-    * MALLOC_DEBUG enables assertions and other sanity checks, and disables
-    * inline functions.
-    */
-#  define MALLOC_DEBUG
-
+#ifdef MOZ_DEBUG
    /* Support optional abort() on OOM. */
 #  define MALLOC_XMALLOC
 
    /* Support SYSV semantics. */
 #  define MALLOC_SYSV
 #endif
 
 #include <sys/types.h>
@@ -355,17 +340,17 @@ static pthread_key_t tlsIndex;
 #endif
 
 #ifdef MOZ_MEMORY_WINDOWS
    /* MSVC++ does not support C99 variable-length arrays. */
 #  define RB_NO_C99_VARARRAYS
 #endif
 #include "rb.h"
 
-#ifdef MALLOC_DEBUG
+#ifdef MOZ_DEBUG
    /* Disable inlining to make debugging easier. */
 #ifdef inline
 #undef inline
 #endif
 
 #  define inline
 #endif
 
@@ -718,17 +703,17 @@ struct arena_chunk_s {
 
 	/* Map of pages within chunk that keeps track of free/large/small. */
 	arena_chunk_map_t map[1]; /* Dynamically sized. */
 };
 typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
 
 typedef struct arena_run_s arena_run_t;
 struct arena_run_s {
-#if defined(MALLOC_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
+#if defined(MOZ_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
 	uint32_t	magic;
 #  define ARENA_RUN_MAGIC 0x384adf93
 #endif
 
 	/* Bin this run is associated with. */
 	arena_bin_t	*bin;
 
 	/* Index of first element that might have a free region. */
@@ -772,17 +757,17 @@ struct arena_bin_s {
 	/* Offset of first region in a run for this bin's size class. */
 	uint32_t	reg0_offset;
 
 	/* Bin statistics. */
 	malloc_bin_stats_t stats;
 };
 
 struct arena_s {
-#if defined(MALLOC_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
+#if defined(MOZ_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
 	uint32_t		magic;
 #  define ARENA_MAGIC 0x947d3d24
 #endif
 
 	/* All operations on this arena require that lock be locked. */
 	malloc_spinlock_t	lock;
 
 	arena_stats_t		stats;
@@ -1060,17 +1045,17 @@ static __thread arena_t	*arenas_map;
 
 /*******************************/
 /*
  * Runtime configuration options.
  */
 const uint8_t kAllocJunk = 0xe4;
 const uint8_t kAllocPoison = 0xe5;
 
-#ifndef MALLOC_PRODUCTION
+#ifdef MOZ_DEBUG
 static bool	opt_abort = true;
 static bool	opt_junk = true;
 static bool	opt_zero = false;
 #else
 static bool	opt_abort = false;
 static const bool	opt_junk = false;
 static const bool	opt_zero = false;
 #endif
@@ -1254,17 +1239,17 @@ static void
 }
 
 #include "mozilla/Assertions.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/TaggedAnonymousMemory.h"
 // Note: MozTaggedAnonymousMmap() could call an LD_PRELOADed mmap
 // instead of the one defined here; use only MozTagAnonymousMemory().
 
-#ifdef MALLOC_DEBUG
+#ifdef MOZ_DEBUG
 #  define assert(e) MOZ_ASSERT(e)
 #else
 #  define assert(e)
 #endif
 
 #ifdef MOZ_MEMORY_ANDROID
 // Android's pthread.h does not declare pthread_atfork() until SDK 21.
 extern "C" MOZ_EXPORT
@@ -2055,29 +2040,29 @@ f(malloc_rtree_t *rtree, uintptr_t key)	
 	subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);		\
 	ret = node[subkey];						\
 	MALLOC_RTREE_UNLOCK(&rtree->lock);				\
 									\
 	MALLOC_RTREE_GET_VALIDATE					\
 	return (ret);							\
 }
 
-#ifdef MALLOC_DEBUG
+#ifdef MOZ_DEBUG
 #  define MALLOC_RTREE_LOCK(l)		malloc_spin_lock(l)
 #  define MALLOC_RTREE_UNLOCK(l)	malloc_spin_unlock(l)
 #  define MALLOC_RTREE_GET_VALIDATE
 MALLOC_RTREE_GET_GENERATE(malloc_rtree_get_locked)
 #  undef MALLOC_RTREE_LOCK
 #  undef MALLOC_RTREE_UNLOCK
 #  undef MALLOC_RTREE_GET_VALIDATE
 #endif
 
 #define	MALLOC_RTREE_LOCK(l)
 #define	MALLOC_RTREE_UNLOCK(l)
-#ifdef MALLOC_DEBUG
+#ifdef MOZ_DEBUG
    /*
     * Suppose that it were possible for a jemalloc-allocated chunk to be
     * munmap()ped, followed by a different allocator in another thread re-using
     * overlapping virtual memory, all without invalidating the cached rtree
     * value.  The result would be a false positive (the rtree would claim that
     * jemalloc owns memory that it had actually discarded).  I don't think this
     * scenario is possible, but the following assertion is a prudent sanity
     * check.
@@ -3092,17 +3077,17 @@ arena_run_alloc(arena_t *arena, arena_bi
 
 static void
 arena_purge(arena_t *arena, bool all)
 {
 	arena_chunk_t *chunk;
 	size_t i, npages;
 	/* If all is set purge all dirty pages. */
 	size_t dirty_max = all ? 1 : opt_dirty_max;
-#ifdef MALLOC_DEBUG
+#ifdef MOZ_DEBUG
 	size_t ndirty = 0;
 	rb_foreach_begin(arena_chunk_t, link_dirty, &arena->chunks_dirty,
 	    chunk) {
 		ndirty += chunk->ndirty;
 	} rb_foreach_end(arena_chunk_t, link_dirty, &arena->chunks_dirty, chunk)
 	assert(ndirty == arena->ndirty);
 #endif
 	RELEASE_ASSERT(all || (arena->ndirty > opt_dirty_max));
@@ -3378,17 +3363,17 @@ arena_bin_nonfull_run_get(arena_t *arena
 		/* The last element has spare bits that need to be unset. */
 		run->regs_mask[i] = (UINT_MAX >> ((1U << (SIZEOF_INT_2POW + 3))
 		    - remainder));
 	}
 
 	run->regs_minelm = 0;
 
 	run->nfree = bin->nregs;
-#if defined(MALLOC_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
+#if defined(MOZ_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
 	run->magic = ARENA_RUN_MAGIC;
 #endif
 
 	bin->stats.nruns++;
 	bin->stats.curruns++;
 	if (bin->stats.curruns > bin->stats.highruns)
 		bin->stats.highruns = bin->stats.curruns;
 	return (run);
@@ -3912,17 +3897,17 @@ arena_dalloc_small(arena_t *arena, arena
 			 * This block's conditional is necessary because if the
 			 * run only contains one region, then it never gets
 			 * inserted into the non-full runs tree.
 			 */
 			RELEASE_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
 				run_mapelm);
 			arena_run_tree_remove(&bin->runs, run_mapelm);
 		}
-#if defined(MALLOC_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
+#if defined(MOZ_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
 		run->magic = 0;
 #endif
 		arena_run_dalloc(arena, run, true);
 		bin->stats.curruns--;
 	} else if (run->nfree == 1 && run != bin->runcur) {
 		/*
 		 * Make sure that bin->runcur always refers to the lowest
 		 * non-full run, if one exists.
@@ -4252,17 +4237,17 @@ arena_new(arena_t *arena)
 
 		bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
 
 		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
 
 		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
 	}
 
-#if defined(MALLOC_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
+#if defined(MOZ_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
 	arena->magic = ARENA_MAGIC;
 #endif
 
 	return (false);
 }
 
 static inline arena_t *
 arenas_fallback()
@@ -4774,17 +4759,17 @@ MALLOC_OUT:
 					opt_dirty_max >>= 1;
 					break;
 				case 'F':
 					if (opt_dirty_max == 0)
 						opt_dirty_max = 1;
 					else if ((opt_dirty_max << 1) != 0)
 						opt_dirty_max <<= 1;
 					break;
-#ifndef MALLOC_PRODUCTION
+#ifdef MOZ_DEBUG
 				case 'j':
 					opt_junk = false;
 					break;
 				case 'J':
 					opt_junk = true;
 					break;
 #endif
 #ifndef MALLOC_STATIC_SIZES
@@ -4841,17 +4826,17 @@ MALLOC_OUT:
 #ifdef MALLOC_XMALLOC
 				case 'x':
 					opt_xmalloc = false;
 					break;
 				case 'X':
 					opt_xmalloc = true;
 					break;
 #endif
-#ifndef MALLOC_PRODUCTION
+#ifdef MOZ_DEBUG
 				case 'z':
 					opt_zero = false;
 					break;
 				case 'Z':
 					opt_zero = true;
 					break;
 #endif
 				default: {
--- a/old-configure.in
+++ b/old-configure.in
@@ -4127,19 +4127,16 @@ if test -z "$MOZ_MEMORY"; then
   case "${target}" in
     *-mingw*)
       if test -z "$WIN32_REDIST_DIR" -a -z "$MOZ_DEBUG"; then
         AC_MSG_WARN([When not building jemalloc, you need to set WIN32_REDIST_DIR to the path to the Visual C++ Redist (usually VCINSTALLDIR\redist\x86\Microsoft.VC80.CRT, for VC++ v8) if you intend to distribute your build.])
       fi
       ;;
   esac
 else
-  if test "x$MOZ_DEBUG" = "x1"; then
-    AC_DEFINE(MOZ_MEMORY_DEBUG)
-  fi
   dnl The generic feature tests that determine how to compute ncpus are long and
   dnl complicated.  Therefore, simply define special cpp variables for the
   dnl platforms we have special knowledge of.
   case "${target}" in
   *-mingw*)
     export MOZ_NO_DEBUG_RTL=1
     ;;
   esac