Bug 1415454 - Remove the unused arena_bin_t* argument to arena_t::AllocRun. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 03 Nov 2017 15:54:20 +0900
changeset 694761 5365daf0cd10d62669057867636b6213c06d266e
parent 694760 8d24f0698c058c57b03100f287c28a1ea1c4c3ef
child 694762 6489add16ef2e96677ba96be3b8db4e508463b6f
push id88233
push userbmo:mh+mozilla@glandium.org
push dateWed, 08 Nov 2017 07:30:11 +0000
reviewersnjn
bugs1415454
milestone58.0a1
Bug 1415454 - Remove the unused arena_bin_t* argument to arena_t::AllocRun. r?njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -980,20 +980,17 @@ public:
 
   arena_t();
 
 private:
   void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
 
   void DeallocChunk(arena_chunk_t* aChunk);
 
-  arena_run_t* AllocRun(arena_bin_t* aBin,
-                        size_t aSize,
-                        bool aLarge,
-                        bool aZero);
+  arena_run_t* AllocRun(size_t aSize, bool aLarge, bool aZero);
 
   void DallocRun(arena_run_t* aRun, bool aDirty);
 
   MOZ_MUST_USE bool SplitRun(arena_run_t* aRun,
                              size_t aSize,
                              bool aLarge,
                              bool aZero);
 
@@ -2584,17 +2581,17 @@ arena_t::DeallocChunk(arena_chunk_t* aCh
   // Dirty page flushing only uses the tree of dirty chunks, so leaving this
   // chunk in the chunks_* trees is sufficient for that purpose.
   mRunsAvail.Remove(&aChunk->map[gChunkHeaderNumPages]);
 
   mSpare = aChunk;
 }
 
 arena_run_t*
-arena_t::AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero)
+arena_t::AllocRun(size_t aSize, bool aLarge, bool aZero)
 {
   arena_run_t* run;
   arena_chunk_map_t* mapelm;
   arena_chunk_map_t key;
 
   MOZ_ASSERT(aSize <= gMaxLargeClass);
   MOZ_ASSERT((aSize & gPageSizeMask) == 0);
 
@@ -2875,17 +2872,17 @@ arena_t::GetNonFullBinRun(arena_bin_t* a
     // run is guaranteed to have available space.
     aBin->mNonFullRuns.Remove(mapelm);
     run = (arena_run_t*)(mapelm->bits & ~gPageSizeMask);
     return run;
   }
   // No existing runs have any space available.
 
   // Allocate a new run.
-  run = AllocRun(aBin, aBin->mRunSize, false, false);
+  run = AllocRun(aBin->mRunSize, false, false);
   if (!run) {
     return nullptr;
   }
   // Don't initialize if a race in arena_t::RunAlloc() allowed an existing
   // run to become usable.
   if (run == aBin->mCurrentRun) {
     return run;
   }
@@ -3062,17 +3059,17 @@ arena_t::MallocLarge(size_t aSize, bool 
 {
   void* ret;
 
   // Large allocation.
   aSize = PAGE_CEILING(aSize);
 
   {
     MutexAutoLock lock(mLock);
-    ret = AllocRun(nullptr, aSize, true, aZero);
+    ret = AllocRun(aSize, true, aZero);
     if (!ret) {
       return nullptr;
     }
     mStats.allocated_large += aSize;
   }
 
   if (aZero == false) {
     if (opt_junk) {
@@ -3116,17 +3113,17 @@ arena_t::Palloc(size_t aAlignment, size_
   size_t offset;
   arena_chunk_t* chunk;
 
   MOZ_ASSERT((aSize & gPageSizeMask) == 0);
   MOZ_ASSERT((aAlignment & gPageSizeMask) == 0);
 
   {
     MutexAutoLock lock(mLock);
-    ret = AllocRun(nullptr, aAllocSize, true, false);
+    ret = AllocRun(aAllocSize, true, false);
     if (!ret) {
       return nullptr;
     }
 
     chunk = GetChunkForPtr(ret);
 
     offset = uintptr_t(ret) & (aAlignment - 1);
     MOZ_ASSERT((offset & gPageSizeMask) == 0);