Bug 1401099 - Move arena_ralloc_large_shrink to a method of arena_t. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 15 Sep 2017 20:44:34 +0900
changeset 667420 2a0f6ab0526c867c10f55a2b8b1bbcb844632628
parent 667419 836553a9d0c4b97b37ec32f5d17a6603b20855c0
child 667421 683ed79ba476c1465f787e010df5128758db3ebd
push id80694
push userbmo:mh+mozilla@glandium.org
push dateWed, 20 Sep 2017 02:07:21 +0000
reviewersnjn
bugs1401099
milestone57.0a1
Bug 1401099 - Move arena_ralloc_large_shrink to a method of arena_t. r?njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -785,20 +785,18 @@ private:
   void DallocRun(arena_run_t* aRun, bool aDirty);
 
 public:
   void SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero);
 
 private:
   void TrimRunHead(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize, size_t aNewSize);
 
-public:
   void TrimRunTail(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize, size_t aNewSize, bool dirty);
 
-private:
   inline void* MallocBinEasy(arena_bin_t* aBin, arena_run_t* aRun);
 
   void* MallocBinHard(arena_bin_t* aBin);
 
   arena_run_t* GetNonFullBinRun(arena_bin_t* aBin);
 
   inline void* MallocSmall(size_t aSize, bool aZero);
 
@@ -808,16 +806,18 @@ public:
   inline void* Malloc(size_t aSize, bool aZero);
 
   void* Palloc(size_t aAlignment, size_t aSize, size_t aAllocSize);
 
   inline void DallocSmall(arena_chunk_t* aChunk, void* aPtr, arena_chunk_map_t *aMapElm);
 
   void DallocLarge(arena_chunk_t* aChunk, void* aPtr);
 
+  void RallocShrinkLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize, size_t aOldSize);
+
   void Purge(bool aAll);
 
   void HardPurge();
 };
 
 /******************************************************************************/
 /*
  * Data.
@@ -3859,31 +3859,30 @@ idalloc(void *ptr)
 
 	offset = CHUNK_ADDR2OFFSET(ptr);
 	if (offset != 0)
 		arena_dalloc(ptr, offset);
 	else
 		huge_dalloc(ptr);
 }
 
-static void
-arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    size_t size, size_t oldsize)
+void
+arena_t::RallocShrinkLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
+                           size_t aOldSize)
 {
-
-	MOZ_ASSERT(size < oldsize);
-
-	/*
-	 * Shrink the run, and make trailing pages available for other
-	 * allocations.
-	 */
-	malloc_spin_lock(&arena->mLock);
-	arena->TrimRunTail(chunk, (arena_run_t *)ptr, oldsize, size, true);
-	arena->mStats.allocated_large -= oldsize - size;
-	malloc_spin_unlock(&arena->mLock);
+  MOZ_ASSERT(aSize < aOldSize);
+
+  /*
+   * Shrink the run, and make trailing pages available for other
+   * allocations.
+   */
+  malloc_spin_lock(&mLock);
+  TrimRunTail(aChunk, (arena_run_t*)aPtr, aOldSize, aSize, true);
+  mStats.allocated_large -= aOldSize - aSize;
+  malloc_spin_unlock(&mLock);
 }
 
 static bool
 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
     size_t size, size_t oldsize)
 {
 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow;
 	size_t npages = oldsize >> pagesize_2pow;
@@ -3943,18 +3942,17 @@ arena_ralloc_large(void *ptr, size_t siz
 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 		arena = chunk->arena;
 		MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
 
 		if (psize < oldsize) {
 			/* Fill before shrinking in order avoid a race. */
 			memset((void *)((uintptr_t)ptr + size), kAllocPoison,
 			    oldsize - size);
-			arena_ralloc_large_shrink(arena, chunk, ptr, psize,
-			    oldsize);
+			arena->RallocShrinkLarge(chunk, ptr, psize, oldsize);
 			return (false);
 		} else {
 			bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
 			    psize, oldsize);
 			if (ret == false && opt_zero) {
 				memset((void *)((uintptr_t)ptr + oldsize), 0,
 				    size - oldsize);
 			}