Bug 1365194 - Call moz_abort directly instead of using a macro to override abort. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 12 May 2017 21:51:16 +0900
changeset 579107 52f424b3bad91fd7c50d314e367a6299bcd92a48
parent 579106 e8ce6ef9cc78e3773c7a0253a50d5c955500fc5f
child 579108 b8feda96500aced1862648e3b6ff42bf4cae1ad3
push id59148
push userbmo:mh+mozilla@glandium.org
push dateWed, 17 May 2017 00:43:10 +0000
reviewersnjn
bugs1365194
milestone55.0a1
Bug 1365194 - Call moz_abort directly instead of using a macro to override abort. r?njn Using -Dabort=moz_abort actually makes the build fail in some libstdc++ headers when building as C++.
memory/mozjemalloc/jemalloc.c
memory/mozjemalloc/moz.build
--- a/memory/mozjemalloc/jemalloc.c
+++ b/memory/mozjemalloc/jemalloc.c
@@ -303,16 +303,18 @@ typedef long ssize_t;
 #endif
 
 #endif
 
 #include "jemalloc_types.h"
 #include "linkedlist.h"
 #include "mozmemory_wrap.h"
 
+extern void moz_abort();
+
 /* Some tools, such as /dev/dsp wrappers, LD_PRELOAD libraries that
  * happen to override mmap() and call dlsym() from their overridden
  * mmap(). The problem is that dlsym() calls malloc(), and this ends
  * up in a dead lock in jemalloc.
  * On these systems, we prefer to directly use the system call.
  * We do that for Linux systems and kfreebsd with GNU userland.
  * Note sanity checks are not done (alignment of offset, ...) because
  * the uses of mmap are pretty limited, in jemalloc.
@@ -1515,25 +1517,25 @@ pages_decommit(void *addr, size_t size)
 	* to VirtualAlloc and recycled, so decommitting the entire region in one
 	* go may not be valid. However, since we allocate at least a chunk at a
 	* time, we may touch any region in chunksized increments.
 	*/
 	size_t pages_size = min(size, chunksize -
 		CHUNK_ADDR2OFFSET((uintptr_t)addr));
 	while (size > 0) {
 		if (!VirtualFree(addr, pages_size, MEM_DECOMMIT))
-			abort();
+			moz_abort();
 		addr = (void *)((uintptr_t)addr + pages_size);
 		size -= pages_size;
 		pages_size = min(size, chunksize);
 	}
 #else
 	if (mmap(addr, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1,
 	    0) == MAP_FAILED)
-		abort();
+		moz_abort();
 	MozTagAnonymousMemory(addr, size, "jemalloc-decommitted");
 #endif
 }
 
 static inline void
 pages_commit(void *addr, size_t size)
 {
 
@@ -1543,25 +1545,25 @@ pages_commit(void *addr, size_t size)
 	* to VirtualAlloc and recycled, so committing the entire region in one
 	* go may not be valid. However, since we allocate at least a chunk at a
 	* time, we may touch any region in chunksized increments.
 	*/
 	size_t pages_size = min(size, chunksize -
 		CHUNK_ADDR2OFFSET((uintptr_t)addr));
 	while (size > 0) {
 		if (!VirtualAlloc(addr, pages_size, MEM_COMMIT, PAGE_READWRITE))
-			abort();
+			moz_abort();
 		addr = (void *)((uintptr_t)addr + pages_size);
 		size -= pages_size;
 		pages_size = min(size, chunksize);
 	}
 #  else
 	if (mmap(addr, size, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE |
 	    MAP_ANON, -1, 0) == MAP_FAILED)
-		abort();
+		moz_abort();
 	MozTagAnonymousMemory(addr, size, "jemalloc");
 #  endif
 }
 
 static bool
 base_pages_alloc(size_t minsize)
 {
 	size_t csize;
@@ -1844,17 +1846,17 @@ pages_map(void *addr, size_t size)
 
 static void
 pages_unmap(void *addr, size_t size)
 {
 	if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
 		_malloc_message(_getprogname(),
 		    ": (malloc) Error in VirtualFree()\n", "", "");
 		if (opt_abort)
-			abort();
+			moz_abort();
 	}
 }
 #else
 #ifdef JEMALLOC_USES_MAP_ALIGN
 static void *
 pages_map_align(size_t size, size_t alignment)
 {
 	void *ret;
@@ -1953,17 +1955,17 @@ pages_map(void *addr, size_t size)
 		if (munmap(ret, size) == -1) {
 			char buf[STRERROR_BUF];
 
 			if (strerror_r(errno, buf, sizeof(buf)) == 0) {
 				_malloc_message(_getprogname(),
 					": (malloc) Error in munmap(): ", buf, "\n");
 			}
 			if (opt_abort)
-				abort();
+				moz_abort();
 		}
 		ret = NULL;
 	}
 	if (ret != NULL) {
 		MozTagAnonymousMemory(ret, size, "jemalloc");
 	}
 
 #if defined(__ia64__) || (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
@@ -1983,17 +1985,17 @@ pages_unmap(void *addr, size_t size)
 	if (munmap(addr, size) == -1) {
 		char buf[STRERROR_BUF];
 
 		if (strerror_r(errno, buf, sizeof(buf)) == 0) {
 			_malloc_message(_getprogname(),
 				": (malloc) Error in munmap(): ", buf, "\n");
 		}
 		if (opt_abort)
-			abort();
+			moz_abort();
 	}
 }
 #endif
 
 #ifdef MOZ_MEMORY_DARWIN
 #define	VM_COPY_MIN (pagesize << 5)
 static inline void
 pages_copy(void *dest, const void *src, size_t n)
@@ -4300,17 +4302,17 @@ arenas_fallback()
 	 * OOM here is quite inconvenient to propagate, since dealing with it
 	 * would require a check for failure in the fast path.  Instead, punt
 	 * by using arenas[0].
 	 * In practice, this is an extremely unlikely failure.
 	 */
 	_malloc_message(_getprogname(),
 	    ": (malloc) Error initializing arena\n", "", "");
 	if (opt_abort)
-		abort();
+		moz_abort();
 
 	return arenas[0];
 }
 
 /* Create a new arena and return it. */
 static arena_t *
 arenas_extend()
 {
@@ -4755,17 +4757,17 @@ malloc_init_hard(void)
 
 	/* We assume that the page size is a power of 2. */
 	assert(((result - 1) & result) == 0);
 #ifdef MALLOC_STATIC_SIZES
 	if (pagesize % (size_t) result) {
 		_malloc_message(_getprogname(),
 				"Compile-time page size does not divide the runtime one.\n",
 				"", "");
-		abort();
+		moz_abort();
 	}
 #else
 	pagesize = (size_t) result;
 	pagesize_mask = (size_t) result - 1;
 	pagesize_2pow = ffs((int)result) - 1;
 #endif
 
 	for (i = 0; i < 3; i++) {
@@ -5131,17 +5133,17 @@ malloc_impl(size_t size)
 
 RETURN:
 	if (ret == NULL) {
 #ifdef MALLOC_XMALLOC
 		if (opt_xmalloc) {
 			_malloc_message(_getprogname(),
 			    ": (malloc) Error in malloc(): out of memory\n", "",
 			    "");
-			abort();
+			moz_abort();
 		}
 #endif
 		errno = ENOMEM;
 	}
 
 	return (ret);
 }
 
@@ -5213,17 +5215,17 @@ MEMALIGN(size_t alignment, size_t size)
 	alignment = alignment < sizeof(void*) ? sizeof(void*) : alignment;
 	ret = ipalloc(alignment, size);
 
 RETURN:
 #ifdef MALLOC_XMALLOC
 	if (opt_xmalloc && ret == NULL) {
 		_malloc_message(_getprogname(),
 		": (malloc) Error in memalign(): out of memory\n", "", "");
-		abort();
+		moz_abort();
 	}
 #endif
 	return (ret);
 }
 
 #ifdef MOZ_MEMORY_ELF
 extern void *
 memalign_impl(size_t alignment, size_t size) __attribute__((alias ("memalign_internal"), visibility ("default")));
@@ -5236,17 +5238,17 @@ posix_memalign_impl(void **memptr, size_
 
 	/* Make sure that alignment is a large enough power of 2. */
 	if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *)) {
 #ifdef MALLOC_XMALLOC
 		if (opt_xmalloc) {
 			_malloc_message(_getprogname(),
 			    ": (malloc) Error in posix_memalign(): "
 			    "invalid alignment\n", "", "");
-			abort();
+			moz_abort();
 		}
 #endif
 		return (EINVAL);
 	}
 
 	/* The 0-->1 size promotion is done in the memalign() call below */
 
 	result = MEMALIGN(alignment, size);
@@ -5262,17 +5264,17 @@ MOZ_MEMORY_API void *
 aligned_alloc_impl(size_t alignment, size_t size)
 {
 	if (size % alignment) {
 #ifdef MALLOC_XMALLOC
 		if (opt_xmalloc) {
 			_malloc_message(_getprogname(),
 			    ": (malloc) Error in aligned_alloc(): "
 			    "size is not multiple of alignment\n", "", "");
-			abort();
+			moz_abort();
 		}
 #endif
 		return (NULL);
 	}
 	return MEMALIGN(alignment, size);
 }
 
 MOZ_MEMORY_API void *
@@ -5321,17 +5323,17 @@ calloc_impl(size_t num, size_t size)
 
 RETURN:
 	if (ret == NULL) {
 #ifdef MALLOC_XMALLOC
 		if (opt_xmalloc) {
 			_malloc_message(_getprogname(),
 			    ": (malloc) Error in calloc(): out of memory\n", "",
 			    "");
-			abort();
+			moz_abort();
 		}
 #endif
 		errno = ENOMEM;
 	}
 
 	return (ret);
 }
 
@@ -5361,34 +5363,34 @@ realloc_impl(void *ptr, size_t size)
 		ret = iralloc(ptr, size);
 
 		if (ret == NULL) {
 #ifdef MALLOC_XMALLOC
 			if (opt_xmalloc) {
 				_malloc_message(_getprogname(),
 				    ": (malloc) Error in realloc(): out of "
 				    "memory\n", "", "");
-				abort();
+				moz_abort();
 			}
 #endif
 			errno = ENOMEM;
 		}
 	} else {
 		if (malloc_init())
 			ret = NULL;
 		else
 			ret = imalloc(size);
 
 		if (ret == NULL) {
 #ifdef MALLOC_XMALLOC
 			if (opt_xmalloc) {
 				_malloc_message(_getprogname(),
 				    ": (malloc) Error in realloc(): out of "
 				    "memory\n", "", "");
-				abort();
+				moz_abort();
 			}
 #endif
 			errno = ENOMEM;
 		}
 	}
 
 #ifdef MALLOC_SYSV
 RETURN:
@@ -5831,17 +5833,17 @@ void
 
 #if defined(MOZ_MEMORY_DARWIN)
 
 __attribute__((constructor))
 void
 jemalloc_darwin_init(void)
 {
 	if (malloc_init_hard())
-		abort();
+		moz_abort();
 }
 
 #endif
 
 /*
  * is_malloc(malloc_impl) is some macro magic to detect if malloc_impl is
  * defined as "malloc" in mozmemory_wrap.h
  */
--- a/memory/mozjemalloc/moz.build
+++ b/memory/mozjemalloc/moz.build
@@ -13,18 +13,16 @@ SOURCES += [
 ]
 FINAL_LIBRARY = 'memory'
 
 # For non release/esr builds, enable (some) fatal jemalloc assertions.  This
 # helps us catch memory errors.
 if CONFIG['MOZ_UPDATE_CHANNEL'] not in ('release', 'esr'):
     DEFINES['MOZ_JEMALLOC_HARD_ASSERTS'] = True
 
-DEFINES['abort'] = 'moz_abort'
-
 DEFINES['MOZ_JEMALLOC_IMPL'] = True
 
 LOCAL_INCLUDES += [
     '/memory/build',
 ]
 
 if CONFIG['GNU_CC']:
     CFLAGS += ['-Wno-unused'] # too many annoying warnings from mfbt/ headers