Bug 1153683 - mozjemalloc: use spinlocks on FreeBSD to avoid recursion. draft
authorJan Beich <jbeich@FreeBSD.org>
Fri, 12 May 2017 19:36:07 +0000
changeset 577122 239b2330c70bf153a33d35999e574814018f2ecf
parent 577084 96b36c5f527dd42e680a230839519eee1fc2c9f3
child 577123 9e248353d69612c6d861b0e87959e87de1154137
push id58619
push userbmo:jbeich@FreeBSD.org
push dateFri, 12 May 2017 21:48:08 +0000
bugs1153683
milestone55.0a1
Bug 1153683 - mozjemalloc: use spinlocks on FreeBSD to avoid recursion. MozReview-Commit-ID: LaHYKUAnNh9
memory/mozjemalloc/jemalloc.c
memory/mozjemalloc/spinlock.h
--- a/memory/mozjemalloc/jemalloc.c
+++ b/memory/mozjemalloc/jemalloc.c
@@ -304,25 +304,31 @@ typedef long ssize_t;
 
 #ifndef MOZ_MEMORY_WINDOWS
 #ifndef MOZ_MEMORY_SOLARIS
 #include <sys/cdefs.h>
 #endif
 #ifndef __DECONST
 #  define __DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
 #endif
-#ifndef MOZ_MEMORY
+#ifdef __FreeBSD__
 __FBSDID("$FreeBSD: head/lib/libc/stdlib/malloc.c 180599 2008-07-18 19:35:44Z jasone $");
+#ifndef MOZ_MEMORY
 #include "libc_private.h"
+#endif
 #ifdef MALLOC_DEBUG
 #  define _LOCK_DEBUG
 #endif
+_Pragma("GCC visibility push(default)");
 #include "spinlock.h"
+_Pragma("GCC visibility pop");
+#ifndef MOZ_MEMORY
 #include "namespace.h"
 #endif
+#endif /* __FreeBSD__ */
 #include <sys/mman.h>
 #ifndef MADV_FREE
 #  define MADV_FREE	MADV_DONTNEED
 #endif
 #ifndef MAP_NOSYNC
 #  define MAP_NOSYNC	0
 #endif
 #include <sys/param.h>
@@ -428,17 +434,22 @@ void *_mmap(void *addr, size_t length, i
 #endif
 #endif
 }
 #define mmap _mmap
 #define munmap(a, l) syscall(SYS_munmap, a, l)
 #endif
 #endif
 
+#ifdef __FreeBSD__
+#define isthreaded __isthreaded
+extern int __isthreaded;
+#else
 static const bool isthreaded = true;
+#endif
 #ifdef MOZ_MEMORY_DARWIN
 static pthread_key_t tlsIndex;
 #endif
 
 #if defined(MOZ_MEMORY_SOLARIS) && defined(MAP_ALIGN) && !defined(JEMALLOC_NEVER_USES_MAP_ALIGN)
 #define JEMALLOC_USES_MAP_ALIGN	 /* Required on Solaris 10. Might improve performance elsewhere. */
 #endif
 
@@ -646,37 +657,37 @@ static pthread_key_t tlsIndex;
 #define malloc_spinlock_t CRITICAL_SECTION
 #elif defined(MOZ_MEMORY_DARWIN)
 typedef struct {
 	OSSpinLock	lock;
 } malloc_mutex_t;
 typedef struct {
 	OSSpinLock	lock;
 } malloc_spinlock_t;
-#elif defined(MOZ_MEMORY)
+#elif defined(MOZ_MEMORY) && !defined(__FreeBSD__)
 typedef pthread_mutex_t malloc_mutex_t;
 typedef pthread_mutex_t malloc_spinlock_t;
 #else
 /* XXX these should #ifdef these for freebsd (and linux?) only */
 typedef struct {
 	spinlock_t	lock;
 } malloc_mutex_t;
-typedef malloc_spinlock_t malloc_mutex_t;
+typedef pthread_mutex_t malloc_spinlock_t;
 #endif
 
 /* Set to true once the allocator has been initialized. */
 static bool malloc_initialized = false;
 
 #if defined(MOZ_MEMORY_WINDOWS)
 /* No init lock for Windows. */
 #elif defined(MOZ_MEMORY_DARWIN)
 static malloc_mutex_t init_lock = {OS_SPINLOCK_INIT};
 #elif defined(MOZ_MEMORY_LINUX) && !defined(MOZ_MEMORY_ANDROID)
 static malloc_mutex_t init_lock = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP;
-#elif defined(MOZ_MEMORY)
+#elif defined(MOZ_MEMORY) && !defined(__FreeBSD__)
 static malloc_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
 #else
 static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER};
 #endif
 
 /******************************************************************************/
 /*
  * Statistics data structures.
@@ -981,17 +992,17 @@ struct arena_bin_s {
 
 struct arena_s {
 #if defined(MALLOC_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
 	uint32_t		magic;
 #  define ARENA_MAGIC 0x947d3d24
 #endif
 
 	/* All operations on this arena require that lock be locked. */
-#ifdef MOZ_MEMORY
+#ifndef __FreeBSD__
 	malloc_spinlock_t	lock;
 #else
 	pthread_mutex_t		lock;
 #endif
 
 #ifdef MALLOC_STATS
 	arena_stats_t		stats;
 #endif
@@ -1278,17 +1289,17 @@ static arena_t		**arenas;
 static unsigned		narenas;
 #ifndef NO_TLS
 #  ifdef MALLOC_BALANCE
 static unsigned		narenas_2pow;
 #  else
 static unsigned		next_arena;
 #  endif
 #endif
-#ifdef MOZ_MEMORY
+#ifndef __FreeBSD__
 static malloc_spinlock_t arenas_lock; /* Protects arenas initialization. */
 #else
 static pthread_mutex_t arenas_lock; /* Protects arenas initialization. */
 #endif
 
 #ifndef NO_TLS
 /*
  * Map of pthread_self() --> arenas[???], used for selecting an arena to use
@@ -1568,16 +1579,25 @@ int pthread_atfork(void (*)(void), void 
 	if (!(assertion)) {			\
 		MOZ_CRASH_UNSAFE_OOL(#assertion);	\
 	}					\
 } while (0)
 #else
 #  define RELEASE_ASSERT(assertion) assert(assertion)
 #endif
 
+#ifdef __FreeBSD__
+/*
+ * We use an unpublished interface to initialize pthread mutexes with an
+ * allocation callback, in order to avoid infinite recursion.
+ */
+MOZ_EXPORT int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+    void *(calloc_cb)(size_t, size_t));
+#endif
+
 /******************************************************************************/
 /*
  * Begin mutex.  We can't use normal pthread mutexes in all places, because
  * they require malloc()ed memory, which causes bootstrapping issues in some
  * cases.
  */
 
 static bool
@@ -1594,17 +1614,17 @@ malloc_mutex_init(malloc_mutex_t *mutex)
 	if (pthread_mutexattr_init(&attr) != 0)
 		return (true);
 	pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
 	if (pthread_mutex_init(mutex, &attr) != 0) {
 		pthread_mutexattr_destroy(&attr);
 		return (true);
 	}
 	pthread_mutexattr_destroy(&attr);
-#elif defined(MOZ_MEMORY)
+#elif defined(MOZ_MEMORY) && !defined(__FreeBSD__)
 	if (pthread_mutex_init(mutex, NULL) != 0)
 		return (true);
 #else
 	static const spinlock_t lock = _SPINLOCK_INITIALIZER;
 
 	mutex->lock = lock;
 #endif
 	return (false);
@@ -1613,33 +1633,33 @@ malloc_mutex_init(malloc_mutex_t *mutex)
 static inline void
 malloc_mutex_lock(malloc_mutex_t *mutex)
 {
 
 #if defined(MOZ_MEMORY_WINDOWS)
 	EnterCriticalSection(mutex);
 #elif defined(MOZ_MEMORY_DARWIN)
 	OSSpinLockLock(&mutex->lock);
-#elif defined(MOZ_MEMORY)
+#elif defined(MOZ_MEMORY) && !defined(__FreeBSD__)
 	pthread_mutex_lock(mutex);
 #else
 	if (isthreaded)
 		_SPINLOCK(&mutex->lock);
 #endif
 }
 
 static inline void
 malloc_mutex_unlock(malloc_mutex_t *mutex)
 {
 
 #if defined(MOZ_MEMORY_WINDOWS)
 	LeaveCriticalSection(mutex);
 #elif defined(MOZ_MEMORY_DARWIN)
 	OSSpinLockUnlock(&mutex->lock);
-#elif defined(MOZ_MEMORY)
+#elif defined(MOZ_MEMORY) && !defined(__FreeBSD__)
 	pthread_mutex_unlock(mutex);
 #else
 	if (isthreaded)
 		_SPINUNLOCK(&mutex->lock);
 #endif
 }
 
 #if (defined(__GNUC__))
@@ -1659,16 +1679,19 @@ malloc_spin_init(malloc_spinlock_t *lock
 	if (pthread_mutexattr_init(&attr) != 0)
 		return (true);
 	pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
 	if (pthread_mutex_init(lock, &attr) != 0) {
 		pthread_mutexattr_destroy(&attr);
 		return (true);
 	}
 	pthread_mutexattr_destroy(&attr);
+#elif defined(__FreeBSD__)
+	if (_pthread_mutex_init_calloc_cb(lock, base_calloc) != 0)
+		return (true);
 #elif defined(MOZ_MEMORY)
 	if (pthread_mutex_init(lock, NULL) != 0)
 		return (true);
 #else
 	lock->lock = _SPINLOCK_INITIALIZER;
 #endif
 	return (false);
 }
@@ -1676,31 +1699,54 @@ malloc_spin_init(malloc_spinlock_t *lock
 static inline void
 malloc_spin_lock(malloc_spinlock_t *lock)
 {
 
 #if defined(MOZ_MEMORY_WINDOWS)
 	EnterCriticalSection(lock);
 #elif defined(MOZ_MEMORY_DARWIN)
 	OSSpinLockLock(&lock->lock);
+#elif defined(__FreeBSD__)
+	if (isthreaded) {
+		if (pthread_mutex_trylock(lock) != 0) {
+			unsigned i;
+
+			/* Exponentially back off. */
+			for (i = 1; i <= SPIN_LIMIT_2POW; i++) {
+				CPU_SPINWAIT;
+				if (pthread_mutex_trylock(lock) == 0)
+					return;
+			}
+
+			/*
+			 * Spinning failed.  Block until the lock becomes
+			 * available, in order to avoid indefinite priority
+			 * inversion.
+			 */
+			pthread_mutex_lock(lock);
+		}
+	}
 #elif defined(MOZ_MEMORY)
 	pthread_mutex_lock(lock);
 #else
 	if (isthreaded)
 		_SPINLOCK(&lock->lock);
 #endif
 }
 
 static inline void
 malloc_spin_unlock(malloc_spinlock_t *lock)
 {
 #if defined(MOZ_MEMORY_WINDOWS)
 	LeaveCriticalSection(lock);
 #elif defined(MOZ_MEMORY_DARWIN)
 	OSSpinLockUnlock(&lock->lock);
+#elif defined(__FreeBSD__)
+	if (isthreaded)
+		pthread_mutex_unlock(lock);
 #elif defined(MOZ_MEMORY)
 	pthread_mutex_unlock(lock);
 #else
 	if (isthreaded)
 		_SPINUNLOCK(&lock->lock);
 #endif
 }
 
@@ -1709,17 +1755,17 @@ malloc_spin_unlock(malloc_spinlock_t *lo
  */
 /******************************************************************************/
 /*
  * Begin spin lock.  Spin locks here are actually adaptive mutexes that block
  * after a period of spinning, because unbounded spinning would allow for
  * priority inversion.
  */
 
-#if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_DARWIN)
+#if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_DARWIN) && !defined(__FreeBSD__)
 #  define	malloc_spin_init	malloc_mutex_init
 #  define	malloc_spin_lock	malloc_mutex_lock
 #  define	malloc_spin_unlock	malloc_mutex_unlock
 #endif
 
 #ifndef MOZ_MEMORY
 /*
  * We use an unpublished interface to initialize pthread mutexes with an
new file mode 100644
--- /dev/null
+++ b/memory/mozjemalloc/spinlock.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 1998 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: head/lib/libc/include/spinlock.h 165968 2007-01-12 07:31:30Z imp $
+ *
+ * Lock definitions used in both libc and libpthread.
+ *
+ */
+
+#ifndef _SPINLOCK_H_
+#define _SPINLOCK_H_
+#include <sys/cdefs.h>
+#include <sys/types.h>
+
+/*
+ * Lock structure with room for debugging information.
+ */
+struct _spinlock {
+	volatile long	access_lock;
+	volatile long	lock_owner;
+	volatile char	*fname;
+	volatile int	lineno;
+};
+typedef struct _spinlock spinlock_t;
+
+#define	_SPINLOCK_INITIALIZER	{ 0, 0, 0, 0 }
+
+#define _SPINUNLOCK(_lck)	_spinunlock(_lck);
+#ifdef	_LOCK_DEBUG
+#define	_SPINLOCK(_lck)		_spinlock_debug(_lck, __FILE__, __LINE__)
+#else
+#define	_SPINLOCK(_lck)		_spinlock(_lck)
+#endif
+
+/*
+ * Thread function prototype definitions:
+ */
+__BEGIN_DECLS
+long	_atomic_lock(volatile long *);
+void	_spinlock(spinlock_t *);
+void	_spinunlock(spinlock_t *);
+void	_spinlock_debug(spinlock_t *, char *, int);
+__END_DECLS
+
+#endif /* _SPINLOCK_H_ */