Bug 1423000 - Move mozjemalloc mutexes to a separate header. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Sun, 03 Dec 2017 14:21:19 +0900
changeset 707240 a2316ce065bca594af8c528bbb24f5909ce46029
parent 707239 911c0c338a4ba8317a177cf6eb18527357a7fd05
child 707241 1fc376243668bec806794257d9d50202d302b6c3
push id92052
push userbmo:mh+mozilla@glandium.org
push dateMon, 04 Dec 2017 23:34:17 +0000
reviewersnjn
bugs1423000
milestone59.0a1
Bug 1423000 - Move mozjemalloc mutexes to a separate header. r?njn Also change the definition of the StaticMutex constructor to unconfuse clang-format.
memory/build/Mutex.h
memory/build/mozjemalloc.cpp
new file mode 100644
--- /dev/null
+++ b/memory/build/Mutex.h
@@ -0,0 +1,141 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef Mutex_h
+#define Mutex_h
+
+#if defined(XP_WIN)
+#include <windows.h>
+#elif defined(XP_DARWIN)
+#include <libkern/OSAtomic.h>
+#else
+#include <pthread.h>
+#endif
+#include "mozilla/GuardObjects.h"
+
+// Mutexes based on spinlocks.  We can't use normal pthread spinlocks in all
+// places, because they require malloc()ed memory, which causes bootstrapping
+// issues in some cases.  We also can't use constructors, because for statics,
+// they would fire after the first use of malloc, resetting the locks.
+struct Mutex
+{
+#if defined(XP_WIN)
+  CRITICAL_SECTION mMutex;
+#elif defined(XP_DARWIN)
+  OSSpinLock mMutex;
+#else
+  pthread_mutex_t mMutex;
+#endif
+
+  // Initializes a mutex. Returns whether initialization succeeded.
+  inline bool Init()
+  {
+#if defined(XP_WIN)
+    if (!InitializeCriticalSectionAndSpinCount(&mMutex, 5000)) {
+      return false;
+    }
+#elif defined(XP_DARWIN)
+    mMutex = OS_SPINLOCK_INIT;
+#elif defined(XP_LINUX) && !defined(ANDROID)
+    pthread_mutexattr_t attr;
+    if (pthread_mutexattr_init(&attr) != 0) {
+      return false;
+    }
+    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
+    if (pthread_mutex_init(&mMutex, &attr) != 0) {
+      pthread_mutexattr_destroy(&attr);
+      return false;
+    }
+    pthread_mutexattr_destroy(&attr);
+#else
+    if (pthread_mutex_init(&mMutex, nullptr) != 0) {
+      return false;
+    }
+#endif
+    return true;
+  }
+
+  inline void Lock()
+  {
+#if defined(XP_WIN)
+    EnterCriticalSection(&mMutex);
+#elif defined(XP_DARWIN)
+    OSSpinLockLock(&mMutex);
+#else
+    pthread_mutex_lock(&mMutex);
+#endif
+  }
+
+  inline void Unlock()
+  {
+#if defined(XP_WIN)
+    LeaveCriticalSection(&mMutex);
+#elif defined(XP_DARWIN)
+    OSSpinLockUnlock(&mMutex);
+#else
+    pthread_mutex_unlock(&mMutex);
+#endif
+  }
+};
+
+// Mutex that can be used for static initialization.
+// On Windows, CRITICAL_SECTION requires a function call to be initialized,
+// but for the initialization lock, a static initializer calling the
+// function would be called too late. We need no-function-call
+// initialization, which SRWLock provides.
+// Ideally, we'd use the same type of locks everywhere, but SRWLocks
+// everywhere incur a performance penalty. See bug 1418389.
+#if defined(XP_WIN)
+struct StaticMutex
+{
+  SRWLOCK mMutex;
+
+  constexpr StaticMutex()
+    : mMutex(SRWLOCK_INIT)
+  {
+  }
+
+  inline void Lock() { AcquireSRWLockExclusive(&mMutex); }
+
+  inline void Unlock() { ReleaseSRWLockExclusive(&mMutex); }
+};
+#else
+struct StaticMutex : public Mutex
+{
+#if defined(XP_DARWIN)
+#define STATIC_MUTEX_INIT OS_SPINLOCK_INIT
+#elif defined(XP_LINUX) && !defined(ANDROID)
+#define STATIC_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
+#else
+#define STATIC_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
+#endif
+  constexpr StaticMutex()
+    : Mutex{ STATIC_MUTEX_INIT }
+  {
+  }
+};
+#endif
+
+template<typename T>
+struct MOZ_RAII AutoLock
+{
+  explicit AutoLock(T& aMutex MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+    : mMutex(aMutex)
+  {
+    MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+    mMutex.Lock();
+  }
+
+  ~AutoLock() { mMutex.Unlock(); }
+
+private:
+  MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER;
+  T& mMutex;
+};
+
+using MutexAutoLock = AutoLock<Mutex>;
+
+#endif
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -124,28 +124,28 @@
 #endif
 
 #include "mozilla/Atomics.h"
 #include "mozilla/Alignment.h"
 #include "mozilla/Assertions.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/CheckedInt.h"
 #include "mozilla/DoublyLinkedList.h"
-#include "mozilla/GuardObjects.h"
 #include "mozilla/Likely.h"
 #include "mozilla/MathAlgorithms.h"
 #include "mozilla/Sprintf.h"
 // Note: MozTaggedAnonymousMmap() could call an LD_PRELOADed mmap
 // instead of the one defined here; use only MozTagAnonymousMemory().
 #include "mozilla/TaggedAnonymousMemory.h"
 #include "mozilla/ThreadLocal.h"
 #include "mozilla/UniquePtr.h"
 #include "mozilla/Unused.h"
 #include "mozilla/fallible.h"
 #include "rb.h"
+#include "Mutex.h"
 #include "Utils.h"
 
 using namespace mozilla;
 
 // On Linux, we use madvise(MADV_DONTNEED) to release memory back to the
 // operating system.  If we release 1MB of live pages with MADV_DONTNEED, our
 // RSS will decrease by 1MB (almost) immediately.
 //
@@ -531,92 +531,16 @@ static size_t opt_dirty_max = DIRTY_MAX_
 // MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
 #if defined(MALLOC_DECOMMIT) && defined(MALLOC_DOUBLE_PURGE)
 #error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
 #endif
 
 static void*
 base_alloc(size_t aSize);
 
-// Mutexes based on spinlocks.  We can't use normal pthread spinlocks in all
-// places, because they require malloc()ed memory, which causes bootstrapping
-// issues in some cases.
-struct Mutex
-{
-#if defined(XP_WIN)
-  CRITICAL_SECTION mMutex;
-#elif defined(XP_DARWIN)
-  OSSpinLock mMutex;
-#else
-  pthread_mutex_t mMutex;
-#endif
-
-  inline bool Init();
-
-  inline void Lock();
-
-  inline void Unlock();
-};
-
-// Mutex that can be used for static initialization.
-// On Windows, CRITICAL_SECTION requires a function call to be initialized,
-// but for the initialization lock, a static initializer calling the
-// function would be called too late. We need no-function-call
-// initialization, which SRWLock provides.
-// Ideally, we'd use the same type of locks everywhere, but SRWLocks
-// everywhere incur a performance penalty. See bug 1418389.
-#if defined(XP_WIN)
-struct StaticMutex
-{
-  SRWLOCK mMutex;
-
-  constexpr StaticMutex()
-    : mMutex(SRWLOCK_INIT)
-  {
-  }
-
-  inline void Lock();
-
-  inline void Unlock();
-};
-#else
-struct StaticMutex : public Mutex
-{
-  constexpr StaticMutex()
-#if defined(XP_DARWIN)
-    : Mutex{ OS_SPINLOCK_INIT }
-#elif defined(XP_LINUX) && !defined(ANDROID)
-    : Mutex{ PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP }
-#else
-    : Mutex{ PTHREAD_MUTEX_INITIALIZER }
-#endif
-  {
-  }
-};
-#endif
-
-template<typename T>
-struct MOZ_RAII AutoLock
-{
-  explicit AutoLock(T& aMutex MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
-    : mMutex(aMutex)
-  {
-    MOZ_GUARD_OBJECT_NOTIFIER_INIT;
-    mMutex.Lock();
-  }
-
-  ~AutoLock() { mMutex.Unlock(); }
-
-private:
-  MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER;
-  T& mMutex;
-};
-
-using MutexAutoLock = AutoLock<Mutex>;
-
 // Set to true once the allocator has been initialized.
 static Atomic<bool> malloc_initialized(false);
 
 static StaticMutex gInitLock;
 
 // ***************************************************************************
 // Statistics data structures.
 
@@ -1358,90 +1282,16 @@ static void
 
 #ifdef ANDROID
 // Android's pthread.h does not declare pthread_atfork() until SDK 21.
 extern "C" MOZ_EXPORT int
 pthread_atfork(void (*)(void), void (*)(void), void (*)(void));
 #endif
 
 // ***************************************************************************
-// Begin mutex.  We can't use normal pthread mutexes in all places, because
-// they require malloc()ed memory, which causes bootstrapping issues in some
-// cases. We also can't use constructors, because for statics, they would fire
-// after the first use of malloc, resetting the locks.
-
-// Initializes a mutex. Returns whether initialization succeeded.
-bool
-Mutex::Init()
-{
-#if defined(XP_WIN)
-  if (!InitializeCriticalSectionAndSpinCount(&mMutex, 5000)) {
-    return false;
-  }
-#elif defined(XP_DARWIN)
-  mMutex = OS_SPINLOCK_INIT;
-#elif defined(XP_LINUX) && !defined(ANDROID)
-  pthread_mutexattr_t attr;
-  if (pthread_mutexattr_init(&attr) != 0) {
-    return false;
-  }
-  pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
-  if (pthread_mutex_init(&mMutex, &attr) != 0) {
-    pthread_mutexattr_destroy(&attr);
-    return false;
-  }
-  pthread_mutexattr_destroy(&attr);
-#else
-  if (pthread_mutex_init(&mMutex, nullptr) != 0) {
-    return false;
-  }
-#endif
-  return true;
-}
-
-void
-Mutex::Lock()
-{
-#if defined(XP_WIN)
-  EnterCriticalSection(&mMutex);
-#elif defined(XP_DARWIN)
-  OSSpinLockLock(&mMutex);
-#else
-  pthread_mutex_lock(&mMutex);
-#endif
-}
-
-void
-Mutex::Unlock()
-{
-#if defined(XP_WIN)
-  LeaveCriticalSection(&mMutex);
-#elif defined(XP_DARWIN)
-  OSSpinLockUnlock(&mMutex);
-#else
-  pthread_mutex_unlock(&mMutex);
-#endif
-}
-
-#if defined(XP_WIN)
-void
-StaticMutex::Lock()
-{
-  AcquireSRWLockExclusive(&mMutex);
-}
-
-void
-StaticMutex::Unlock()
-{
-  ReleaseSRWLockExclusive(&mMutex);
-}
-#endif
-
-// End mutex.
-// ***************************************************************************
 // Begin Utility functions/macros.
 
 // Return the chunk address for allocation address a.
 static inline arena_chunk_t*
 GetChunkForPtr(const void* aPtr)
 {
   return (arena_chunk_t*)(uintptr_t(aPtr) & ~kChunkSizeMask);
 }