Bug 1423000 - Re-run clang-format on memory/build. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Sun, 03 Dec 2017 14:22:05 +0900
changeset 707244 48e567028a3c48d18bc95e20273c603629d24980
parent 707243 9e35794b597dfeadd45081e0d107ed68eb5d658b
child 707245 ca4cd82926443b194bbbe403154349cf2f7813ce
push id92052
push userbmo:mh+mozilla@glandium.org
push dateMon, 04 Dec 2017 23:34:17 +0000
reviewersnjn
bugs1423000
milestone59.0a1
Bug 1423000 - Re-run clang-format on memory/build. r?njn Most adjustements come from some recent .clang-format changes. A few were overlooked from changes to the code.
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -1010,18 +1010,18 @@ private:
   bool RallocGrowLarge(arena_chunk_t* aChunk,
                        void* aPtr,
                        size_t aSize,
                        size_t aOldSize);
 
   void* RallocSmallOrLarge(void* aPtr, size_t aSize, size_t aOldSize);
 
   void* RallocHuge(void* aPtr, size_t aSize, size_t aOldSize);
+
 public:
-
   inline void* Malloc(size_t aSize, bool aZero);
 
   void* Palloc(size_t aAlignment, size_t aSize);
 
   inline void DallocSmall(arena_chunk_t* aChunk,
                           void* aPtr,
                           arena_chunk_map_t* aMapElm);
 
@@ -1182,20 +1182,21 @@ static void* base_past_addr; // Addr imm
 static extent_node_t* base_nodes;
 static Mutex base_mtx;
 static size_t base_mapped;
 static size_t base_committed;
 
 // ******
 // Arenas.
 
-// The arena associated with the current thread (per jemalloc_thread_local_arena)
-// On OSX, __thread/thread_local circles back calling malloc to allocate storage
-// on first access on each thread, which leads to an infinite loop, but
-// pthread-based TLS somehow doesn't have this problem.
+// The arena associated with the current thread (per
+// jemalloc_thread_local_arena) On OSX, __thread/thread_local circles back
+// calling malloc to allocate storage on first access on each thread, which
+// leads to an infinite loop, but pthread-based TLS somehow doesn't have this
+// problem.
 #if !defined(XP_DARWIN)
 static MOZ_THREAD_LOCAL(arena_t*) thread_arena;
 #else
 static detail::ThreadLocal<arena_t*, detail::ThreadLocalKeyStorage>
   thread_arena;
 #endif
 
 // *****************************
@@ -1306,17 +1307,18 @@ GetChunkOffsetForPtr(const void* aPtr)
 static inline const char*
 _getprogname(void)
 {
 
   return "<jemalloc>";
 }
 
 // Fill the given range of memory with zeroes or junk depending on opt_junk and
-// opt_zero. Callers can force filling with zeroes through the aForceZero argument.
+// opt_zero. Callers can force filling with zeroes through the aForceZero
+// argument.
 static inline void
 ApplyZeroOrJunk(void* aPtr, size_t aSize)
 {
   if (opt_junk) {
     memset(aPtr, kAllocJunk, aSize);
   } else if (opt_zero) {
     memset(aPtr, 0, aSize);
   }
@@ -1530,25 +1532,25 @@ pages_unmap(void* aAddr, size_t aSize)
 }
 
 static void*
 pages_map(void* aAddr, size_t aSize)
 {
   void* ret;
 #if defined(__ia64__) ||                                                       \
   (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
-  // The JS engine assumes that all allocated pointers have their high 17 bits clear,
-  // which ia64's mmap doesn't support directly. However, we can emulate it by passing
-  // mmap an "addr" parameter with those bits clear. The mmap will return that address,
-  // or the nearest available memory above that address, providing a near-guarantee
-  // that those bits are clear. If they are not, we return nullptr below to indicate
-  // out-of-memory.
+  // The JS engine assumes that all allocated pointers have their high 17 bits
+  // clear, which ia64's mmap doesn't support directly. However, we can emulate
+  // it by passing mmap an "addr" parameter with those bits clear. The mmap will
+  // return that address, or the nearest available memory above that address,
+  // providing a near-guarantee that those bits are clear. If they are not, we
+  // return nullptr below to indicate out-of-memory.
   //
-  // The addr is chosen as 0x0000070000000000, which still allows about 120TB of virtual
-  // address space.
+  // The addr is chosen as 0x0000070000000000, which still allows about 120TB of
+  // virtual address space.
   //
   // See Bug 589735 for more information.
   bool check_placement = true;
   if (!aAddr) {
     aAddr = (void*)0x0000070000000000;
     check_placement = false;
   }
 #endif
@@ -1591,17 +1593,18 @@ pages_map(void* aAddr, size_t aSize)
 #if defined(__ia64__) ||                                                       \
   (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
   // If the allocated memory doesn't have its upper 17 bits clear, consider it
   // as out of memory.
   else if ((long long)ret & 0xffff800000000000) {
     munmap(ret, aSize);
     ret = nullptr;
   }
-  // If the caller requested a specific memory location, verify that's what mmap returned.
+  // If the caller requested a specific memory location, verify that's what mmap
+  // returned.
   else if (check_placement && ret != aAddr) {
 #else
   else if (aAddr && ret != aAddr) {
 #endif
     // We succeeded in mapping memory, but not in the right place.
     pages_unmap(ret, aSize);
     ret = nullptr;
   }
@@ -2480,18 +2483,18 @@ arena_t::DeallocChunk(arena_chunk_t* aCh
     }
 #endif
 
     chunk_dealloc((void*)mSpare, kChunkSize, ARENA_CHUNK);
     mStats.mapped -= kChunkSize;
     mStats.committed -= gChunkHeaderNumPages;
   }
 
-  // Remove run from the tree of available runs, so that the arena does not use it.
-  // Dirty page flushing only uses the tree of dirty chunks, so leaving this
+  // Remove run from the tree of available runs, so that the arena does not use
+  // it. Dirty page flushing only uses the tree of dirty chunks, so leaving this
   // chunk in the chunks_* trees is sufficient for that purpose.
   mRunsAvail.Remove(&aChunk->map[gChunkHeaderNumPages]);
 
   mSpare = aChunk;
 }
 
 arena_run_t*
 arena_t::AllocRun(size_t aSize, bool aLarge, bool aZero)
@@ -3576,18 +3579,17 @@ arena_t::RallocSmallOrLarge(void* aPtr, 
 {
   void* ret;
   size_t copysize;
   SizeClass sizeClass(aSize);
 
   // Try to avoid moving the allocation.
   if (aOldSize <= gMaxLargeClass && sizeClass.Size() == aOldSize) {
     if (aSize < aOldSize) {
-      memset(
-        (void*)(uintptr_t(aPtr) + aSize), kAllocPoison, aOldSize - aSize);
+      memset((void*)(uintptr_t(aPtr) + aSize), kAllocPoison, aOldSize - aSize);
     }
     return aPtr;
   }
   if (sizeClass.Type() == SizeClass::Large && aOldSize > gMaxBinClass &&
       aOldSize <= gMaxLargeClass) {
     arena_chunk_t* chunk = GetChunkForPtr(aPtr);
     if (sizeClass.Size() < aOldSize) {
       // Fill before shrinking in order to avoid a race.