Bug 1365191 - Remove !MOZ_MEMORY sections in mozjemalloc. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Thu, 11 May 2017 17:34:54 +0900
changeset 578670 5ea30f5ac177f881a8242b729c9a0fe814ab3f81
parent 578669 ddea2ee237e55db891acb79842f56c228d1bcdc8
child 578671 b53ee79d34ad4576b1fa398228a3485148a14043
push id59012
push userbmo:mh+mozilla@glandium.org
push dateTue, 16 May 2017 09:18:36 +0000
reviewersnjn
bugs1365191
milestone55.0a1
Bug 1365191 - Remove !MOZ_MEMORY sections in mozjemalloc. r?njn MOZ_MEMORY is always defined when building mozjemalloc. Due to the origin of the code, this was all FreeBSD-specific code, and if we want to add FreeBSD support, we will probably need to add some of it, but I'd rather avoid keeping the difference between FreeBSD and other posix systems if we can.
memory/mozjemalloc/jemalloc.c
--- a/memory/mozjemalloc/jemalloc.c
+++ b/memory/mozjemalloc/jemalloc.c
@@ -277,62 +277,37 @@ typedef long ssize_t;
 
 #ifndef MOZ_MEMORY_WINDOWS
 #ifndef MOZ_MEMORY_SOLARIS
 #include <sys/cdefs.h>
 #endif
 #ifndef __DECONST
 #  define __DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
 #endif
-#ifndef MOZ_MEMORY
-__FBSDID("$FreeBSD: head/lib/libc/stdlib/malloc.c 180599 2008-07-18 19:35:44Z jasone $");
-#include "libc_private.h"
-#ifdef MALLOC_DEBUG
-#  define _LOCK_DEBUG
-#endif
-#include "spinlock.h"
-#include "namespace.h"
-#endif
 #include <sys/mman.h>
 #ifndef MADV_FREE
 #  define MADV_FREE	MADV_DONTNEED
 #endif
 #ifndef MAP_NOSYNC
 #  define MAP_NOSYNC	0
 #endif
 #include <sys/param.h>
-#ifndef MOZ_MEMORY
-#include <sys/stddef.h>
-#endif
 #include <sys/time.h>
 #include <sys/types.h>
 #if !defined(MOZ_MEMORY_SOLARIS) && !defined(MOZ_MEMORY_ANDROID)
 #include <sys/sysctl.h>
 #endif
 #include <sys/uio.h>
-#ifndef MOZ_MEMORY
-#include <sys/ktrace.h> /* Must come after several other sys/ includes. */
-
-#include <machine/atomic.h>
-#include <machine/cpufunc.h>
-#include <machine/vmparam.h>
-#endif
 
 #include <errno.h>
 #include <limits.h>
 #ifndef SIZE_T_MAX
 #  define SIZE_T_MAX	SIZE_MAX
 #endif
 #include <pthread.h>
-#ifdef MOZ_MEMORY_DARWIN
-#define _pthread_mutex_init pthread_mutex_init
-#define _pthread_mutex_trylock pthread_mutex_trylock
-#define _pthread_mutex_lock pthread_mutex_lock
-#define _pthread_mutex_unlock pthread_mutex_unlock
-#endif
 #include <sched.h>
 #include <stdarg.h>
 #include <stdio.h>
 #include <stdbool.h>
 #include <stdint.h>
 #include <stdlib.h>
 #include <string.h>
 #ifndef MOZ_MEMORY_DARWIN
@@ -343,20 +318,16 @@ typedef long ssize_t;
 #ifdef MOZ_MEMORY_DARWIN
 #include <libkern/OSAtomic.h>
 #include <mach/mach_error.h>
 #include <mach/mach_init.h>
 #include <mach/vm_map.h>
 #include <malloc/malloc.h>
 #endif
 
-#ifndef MOZ_MEMORY
-#include "un-namespace.h"
-#endif
-
 #endif
 
 #include "jemalloc_types.h"
 #include "linkedlist.h"
 #include "mozmemory_wrap.h"
 
 /* Some tools, such as /dev/dsp wrappers, LD_PRELOAD libraries that
  * happen to override mmap() and call dlsym() from their overridden
@@ -507,39 +478,16 @@ static pthread_key_t tlsIndex;
  *
  *   (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
  */
 #define	RUN_BFP			12
 /*                                    \/   Implicit binary fixed point. */
 #define	RUN_MAX_OVRHD		0x0000003dU
 #define	RUN_MAX_OVRHD_RELAX	0x00001800U
 
-/*
- * Hyper-threaded CPUs may need a special instruction inside spin loops in
- * order to yield to another virtual CPU.  If no such instruction is defined
- * above, make CPU_SPINWAIT a no-op.
- */
-#ifndef CPU_SPINWAIT
-#  define CPU_SPINWAIT
-#endif
-
-/*
- * Adaptive spinning must eventually switch to blocking, in order to avoid the
- * potential for priority inversion deadlock.  Backing off past a certain point
- * can actually waste time.
- */
-#define	SPIN_LIMIT_2POW		11
-
-/*
- * Conversion from spinning to blocking is expensive; we use (1U <<
- * BLOCK_COST_2POW) to estimate how many more times costly blocking is than
- * worst-case spinning.
- */
-#define	BLOCK_COST_2POW		4
-
 /******************************************************************************/
 
 /* MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive. */
 #if defined(MALLOC_DECOMMIT) && defined(MALLOC_DOUBLE_PURGE)
 #error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
 #endif
 
 /*
@@ -552,40 +500,32 @@ static pthread_key_t tlsIndex;
 #define malloc_spinlock_t CRITICAL_SECTION
 #elif defined(MOZ_MEMORY_DARWIN)
 typedef struct {
 	OSSpinLock	lock;
 } malloc_mutex_t;
 typedef struct {
 	OSSpinLock	lock;
 } malloc_spinlock_t;
-#elif defined(MOZ_MEMORY)
+#else
 typedef pthread_mutex_t malloc_mutex_t;
 typedef pthread_mutex_t malloc_spinlock_t;
-#else
-/* XXX these should #ifdef these for freebsd (and linux?) only */
-typedef struct {
-	spinlock_t	lock;
-} malloc_mutex_t;
-typedef malloc_spinlock_t malloc_mutex_t;
 #endif
 
 /* Set to true once the allocator has been initialized. */
 static bool malloc_initialized = false;
 
 #if defined(MOZ_MEMORY_WINDOWS)
 /* No init lock for Windows. */
 #elif defined(MOZ_MEMORY_DARWIN)
 static malloc_mutex_t init_lock = {OS_SPINLOCK_INIT};
 #elif defined(MOZ_MEMORY_LINUX) && !defined(MOZ_MEMORY_ANDROID)
 static malloc_mutex_t init_lock = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP;
-#elif defined(MOZ_MEMORY)
+#else
 static malloc_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
-#else
-static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER};
 #endif
 
 /******************************************************************************/
 /*
  * Statistics data structures.
  */
 
 #ifdef MALLOC_STATS
@@ -882,21 +822,17 @@ struct arena_bin_s {
 
 struct arena_s {
 #if defined(MALLOC_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
 	uint32_t		magic;
 #  define ARENA_MAGIC 0x947d3d24
 #endif
 
 	/* All operations on this arena require that lock be locked. */
-#ifdef MOZ_MEMORY
 	malloc_spinlock_t	lock;
-#else
-	pthread_mutex_t		lock;
-#endif
 
 #ifdef MALLOC_STATS
 	arena_stats_t		stats;
 #endif
 
 	/* Tree of dirty-page-containing chunks this arena manages. */
 	arena_chunk_tree_t	chunks_dirty;
 
@@ -1159,21 +1095,17 @@ static size_t		base_committed;
  */
 
 /*
  * Arenas that are used to service external requests.  Not all elements of the
  * arenas array are necessarily used; arenas are created lazily as needed.
  */
 static arena_t		**arenas;
 static unsigned		narenas;
-#ifdef MOZ_MEMORY
 static malloc_spinlock_t arenas_lock; /* Protects arenas initialization. */
-#else
-static pthread_mutex_t arenas_lock; /* Protects arenas initialization. */
-#endif
 
 #ifndef NO_TLS
 /*
  * Map of pthread_self() --> arenas[???], used for selecting an arena to use
  * for allocations.
  */
 #ifndef MOZ_MEMORY_WINDOWS
 static __thread arena_t	*arenas_map;
@@ -1391,17 +1323,17 @@ umax2s(uintmax_t x, unsigned base, char 
 	}
 
 	return (&s[i]);
 }
 
 static void
 wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
 {
-#if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_WINDOWS)
+#if !defined(MOZ_MEMORY_WINDOWS)
 #define	_write	write
 #endif
 	// Pretend to check _write() errors to suppress gcc warnings about
 	// warn_unused_result annotations in some versions of glibc headers.
 	if (_write(STDERR_FILENO, p1, (unsigned int) strlen(p1)) < 0)
 		return;
 	if (_write(STDERR_FILENO, p2, (unsigned int) strlen(p2)) < 0)
 		return;
@@ -1463,54 +1395,46 @@ malloc_mutex_init(malloc_mutex_t *mutex)
 	if (pthread_mutexattr_init(&attr) != 0)
 		return (true);
 	pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
 	if (pthread_mutex_init(mutex, &attr) != 0) {
 		pthread_mutexattr_destroy(&attr);
 		return (true);
 	}
 	pthread_mutexattr_destroy(&attr);
-#elif defined(MOZ_MEMORY)
+#else
 	if (pthread_mutex_init(mutex, NULL) != 0)
 		return (true);
-#else
-	static const spinlock_t lock = _SPINLOCK_INITIALIZER;
-
-	mutex->lock = lock;
 #endif
 	return (false);
 }
 
 static inline void
 malloc_mutex_lock(malloc_mutex_t *mutex)
 {
 
 #if defined(MOZ_MEMORY_WINDOWS)
 	EnterCriticalSection(mutex);
 #elif defined(MOZ_MEMORY_DARWIN)
 	OSSpinLockLock(&mutex->lock);
-#elif defined(MOZ_MEMORY)
+#else
 	pthread_mutex_lock(mutex);
-#else
-	_SPINLOCK(&mutex->lock);
 #endif
 }
 
 static inline void
 malloc_mutex_unlock(malloc_mutex_t *mutex)
 {
 
 #if defined(MOZ_MEMORY_WINDOWS)
 	LeaveCriticalSection(mutex);
 #elif defined(MOZ_MEMORY_DARWIN)
 	OSSpinLockUnlock(&mutex->lock);
-#elif defined(MOZ_MEMORY)
+#else
 	pthread_mutex_unlock(mutex);
-#else
-	_SPINUNLOCK(&mutex->lock);
 #endif
 }
 
 #if (defined(__GNUC__))
 __attribute__((unused))
 #  endif
 static bool
 malloc_spin_init(malloc_spinlock_t *lock)
@@ -1525,139 +1449,64 @@ malloc_spin_init(malloc_spinlock_t *lock
 	if (pthread_mutexattr_init(&attr) != 0)
 		return (true);
 	pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
 	if (pthread_mutex_init(lock, &attr) != 0) {
 		pthread_mutexattr_destroy(&attr);
 		return (true);
 	}
 	pthread_mutexattr_destroy(&attr);
-#elif defined(MOZ_MEMORY)
+#else
 	if (pthread_mutex_init(lock, NULL) != 0)
 		return (true);
-#else
-	lock->lock = _SPINLOCK_INITIALIZER;
 #endif
 	return (false);
 }
 
 static inline void
 malloc_spin_lock(malloc_spinlock_t *lock)
 {
 
 #if defined(MOZ_MEMORY_WINDOWS)
 	EnterCriticalSection(lock);
 #elif defined(MOZ_MEMORY_DARWIN)
 	OSSpinLockLock(&lock->lock);
-#elif defined(MOZ_MEMORY)
+#else
 	pthread_mutex_lock(lock);
-#else
-	_SPINLOCK(&lock->lock);
 #endif
 }
 
 static inline void
 malloc_spin_unlock(malloc_spinlock_t *lock)
 {
 #if defined(MOZ_MEMORY_WINDOWS)
 	LeaveCriticalSection(lock);
 #elif defined(MOZ_MEMORY_DARWIN)
 	OSSpinLockUnlock(&lock->lock);
-#elif defined(MOZ_MEMORY)
+#else
 	pthread_mutex_unlock(lock);
-#else
-	_SPINUNLOCK(&lock->lock);
 #endif
 }
 
 /*
  * End mutex.
  */
 /******************************************************************************/
 /*
  * Begin spin lock.  Spin locks here are actually adaptive mutexes that block
  * after a period of spinning, because unbounded spinning would allow for
  * priority inversion.
  */
 
-#if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_DARWIN)
+#if !defined(MOZ_MEMORY_DARWIN)
 #  define	malloc_spin_init	malloc_mutex_init
 #  define	malloc_spin_lock	malloc_mutex_lock
 #  define	malloc_spin_unlock	malloc_mutex_unlock
 #endif
 
-#ifndef MOZ_MEMORY
-/*
- * We use an unpublished interface to initialize pthread mutexes with an
- * allocation callback, in order to avoid infinite recursion.
- */
-int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
-    void *(calloc_cb)(size_t, size_t));
-
-__weak_reference(_pthread_mutex_init_calloc_cb_stub,
-    _pthread_mutex_init_calloc_cb);
-
-int
-_pthread_mutex_init_calloc_cb_stub(pthread_mutex_t *mutex,
-    void *(calloc_cb)(size_t, size_t))
-{
-
-	return (0);
-}
-
-static bool
-malloc_spin_init(pthread_mutex_t *lock)
-{
-
-	if (_pthread_mutex_init_calloc_cb(lock, base_calloc) != 0)
-		return (true);
-
-	return (false);
-}
-
-static inline unsigned
-malloc_spin_lock(pthread_mutex_t *lock)
-{
-	unsigned ret = 0;
-
-	if (_pthread_mutex_trylock(lock) != 0) {
-		unsigned i;
-		volatile unsigned j;
-
-		/* Exponentially back off. */
-		for (i = 1; i <= SPIN_LIMIT_2POW; i++) {
-			for (j = 0; j < (1U << i); j++)
-				ret++;
-
-			CPU_SPINWAIT;
-			if (_pthread_mutex_trylock(lock) == 0)
-				return (ret);
-		}
-
-		/*
-		 * Spinning failed.  Block until the lock becomes
-		 * available, in order to avoid indefinite priority
-		 * inversion.
-		 */
-		_pthread_mutex_lock(lock);
-		assert((ret << BLOCK_COST_2POW) != 0);
-		return (ret << BLOCK_COST_2POW);
-	}
-
-	return (ret);
-}
-
-static inline void
-malloc_spin_unlock(pthread_mutex_t *lock)
-{
-
-	_pthread_mutex_unlock(lock);
-}
-#endif
-
 /*
  * End spin lock.
  */
 /******************************************************************************/
 /*
  * Begin Utility functions/macros.
  */