Bug 1401099 - Move arena_ralloc_large_grow to a method of arena_t. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 15 Sep 2017 20:50:42 +0900
changeset 667421 683ed79ba476c1465f787e010df5128758db3ebd
parent 667420 2a0f6ab0526c867c10f55a2b8b1bbcb844632628
child 732369 31fd49b5097dab8040ebaf25757a020066b2d7d5
push id80694
push userbmo:mh+mozilla@glandium.org
push dateWed, 20 Sep 2017 02:07:21 +0000
reviewersnjn
bugs1401099
milestone57.0a1
Bug 1401099 - Move arena_ralloc_large_grow to a method of arena_t. r?njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -779,20 +779,18 @@ private:
   void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
 
   void DeallocChunk(arena_chunk_t* aChunk);
 
   arena_run_t* AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero);
 
   void DallocRun(arena_run_t* aRun, bool aDirty);
 
-public:
   void SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero);
 
-private:
   void TrimRunHead(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize, size_t aNewSize);
 
   void TrimRunTail(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize, size_t aNewSize, bool dirty);
 
   inline void* MallocBinEasy(arena_bin_t* aBin, arena_run_t* aRun);
 
   void* MallocBinHard(arena_bin_t* aBin);
 
@@ -808,16 +806,18 @@ public:
   void* Palloc(size_t aAlignment, size_t aSize, size_t aAllocSize);
 
   inline void DallocSmall(arena_chunk_t* aChunk, void* aPtr, arena_chunk_map_t *aMapElm);
 
   void DallocLarge(arena_chunk_t* aChunk, void* aPtr);
 
   void RallocShrinkLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize, size_t aOldSize);
 
+  bool RallocGrowLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize, size_t aOldSize);
+
   void Purge(bool aAll);
 
   void HardPurge();
 };
 
 /******************************************************************************/
 /*
  * Data.
@@ -3875,52 +3875,52 @@ arena_t::RallocShrinkLarge(arena_chunk_t
    * allocations.
    */
   malloc_spin_lock(&mLock);
   TrimRunTail(aChunk, (arena_run_t*)aPtr, aOldSize, aSize, true);
   mStats.allocated_large -= aOldSize - aSize;
   malloc_spin_unlock(&mLock);
 }
 
-static bool
-arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    size_t size, size_t oldsize)
+bool
+arena_t::RallocGrowLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
+                         size_t aOldSize)
 {
-	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow;
-	size_t npages = oldsize >> pagesize_2pow;
-
-	malloc_spin_lock(&arena->mLock);
-	MOZ_DIAGNOSTIC_ASSERT(oldsize == (chunk->map[pageind].bits & ~pagesize_mask));
-
-	/* Try to extend the run. */
-	MOZ_ASSERT(size > oldsize);
-	if (pageind + npages < chunk_npages && (chunk->map[pageind+npages].bits
-	    & CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[pageind+npages].bits &
-	    ~pagesize_mask) >= size - oldsize) {
-		/*
-		 * The next run is available and sufficiently large.  Split the
-		 * following run, then merge the first part with the existing
-		 * allocation.
-		 */
-		arena->SplitRun((arena_run_t *)(uintptr_t(chunk) +
-		    ((pageind+npages) << pagesize_2pow)), size - oldsize, true,
-		    false);
-
-		chunk->map[pageind].bits = size | CHUNK_MAP_LARGE |
-		    CHUNK_MAP_ALLOCATED;
-		chunk->map[pageind+npages].bits = CHUNK_MAP_LARGE |
-		    CHUNK_MAP_ALLOCATED;
-
-		arena->mStats.allocated_large += size - oldsize;
-		malloc_spin_unlock(&arena->mLock);
-		return (false);
-	}
-	malloc_spin_unlock(&arena->mLock);
-
-	return (true);
+  size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> pagesize_2pow;
+  size_t npages = aOldSize >> pagesize_2pow;
+
+  malloc_spin_lock(&mLock);
+  MOZ_DIAGNOSTIC_ASSERT(aOldSize == (aChunk->map[pageind].bits & ~pagesize_mask));
+
+  /* Try to extend the run. */
+  MOZ_ASSERT(aSize > aOldSize);
+  if (pageind + npages < chunk_npages && (aChunk->map[pageind+npages].bits
+      & CHUNK_MAP_ALLOCATED) == 0 && (aChunk->map[pageind+npages].bits &
+      ~pagesize_mask) >= aSize - aOldSize) {
+    /*
+     * The next run is available and sufficiently large.  Split the
+     * following run, then merge the first part with the existing
+     * allocation.
+     */
+    SplitRun((arena_run_t *)(uintptr_t(aChunk) +
+        ((pageind+npages) << pagesize_2pow)), aSize - aOldSize, true,
+        false);
+
+    aChunk->map[pageind].bits = aSize | CHUNK_MAP_LARGE |
+        CHUNK_MAP_ALLOCATED;
+    aChunk->map[pageind+npages].bits = CHUNK_MAP_LARGE |
+        CHUNK_MAP_ALLOCATED;
+
+    mStats.allocated_large += aSize - aOldSize;
+    malloc_spin_unlock(&mLock);
+    return false;
+  }
+  malloc_spin_unlock(&mLock);
+
+  return true;
 }
 
 /*
  * Try to resize a large allocation, in order to avoid copying.  This will
  * always fail if growing an object, and the following run is already in use.
  */
 static bool
 arena_ralloc_large(void *ptr, size_t size, size_t oldsize)
@@ -3945,18 +3945,17 @@ arena_ralloc_large(void *ptr, size_t siz
 
 		if (psize < oldsize) {
 			/* Fill before shrinking in order avoid a race. */
 			memset((void *)((uintptr_t)ptr + size), kAllocPoison,
 			    oldsize - size);
 			arena->RallocShrinkLarge(chunk, ptr, psize, oldsize);
 			return (false);
 		} else {
-			bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
-			    psize, oldsize);
+			bool ret = arena->RallocGrowLarge(chunk, ptr, psize, oldsize);
 			if (ret == false && opt_zero) {
 				memset((void *)((uintptr_t)ptr + oldsize), 0,
 				    size - oldsize);
 			}
 			return (ret);
 		}
 	}
 }