Bug 1414155 - Rename chunk related constants. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 03 Nov 2017 12:16:11 +0900
changeset 693256 fb94afb235b8767bb889b3125ab1ea227d657c23
parent 693255 d26a0ccd1a16652edb9fcc3a76bf429de2762682
child 693257 e71852027eee19f3e2f1c9f627037b67a48c98cb
push id87736
push userbmo:mh+mozilla@glandium.org
push dateSat, 04 Nov 2017 22:15:28 +0000
reviewersnjn
bugs1414155
milestone58.0a1
Bug 1414155 - Rename chunk related constants. r?njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -455,18 +455,18 @@ static size_t gPageSize;
 #define GLOBAL_LOG2 FloorLog2
 #define GLOBAL_ASSERT MOZ_RELEASE_ASSERT
 #endif
 
 DECLARE_GLOBAL(size_t, gMaxSubPageClass)
 DECLARE_GLOBAL(uint8_t, nsbins)
 DECLARE_GLOBAL(uint8_t, gPageSize2Pow)
 DECLARE_GLOBAL(size_t, gPageSizeMask)
-DECLARE_GLOBAL(size_t, chunk_npages)
-DECLARE_GLOBAL(size_t, arena_chunk_header_npages)
+DECLARE_GLOBAL(size_t, gChunkNumPages)
+DECLARE_GLOBAL(size_t, gChunkHeaderNumPages)
 DECLARE_GLOBAL(size_t, gMaxLargeClass)
 
 DEFINE_GLOBALS
 // Largest sub-page size class.
 DEFINE_GLOBAL(size_t) gMaxSubPageClass = gPageSize / 2;
 
 // Max size class for bins.
 #define gMaxBinClass gMaxSubPageClass
@@ -474,29 +474,29 @@ DEFINE_GLOBAL(size_t) gMaxSubPageClass =
 // Number of (2^n)-spaced sub-page bins.
 DEFINE_GLOBAL(uint8_t)
 nsbins = GLOBAL_LOG2(gMaxSubPageClass) - LOG2(kMaxQuantumClass);
 
 DEFINE_GLOBAL(uint8_t) gPageSize2Pow = GLOBAL_LOG2(gPageSize);
 DEFINE_GLOBAL(size_t) gPageSizeMask = gPageSize - 1;
 
 // Number of pages in a chunk.
-DEFINE_GLOBAL(size_t) chunk_npages = kChunkSize >> gPageSize2Pow;
+DEFINE_GLOBAL(size_t) gChunkNumPages = kChunkSize >> gPageSize2Pow;
 
 // Number of pages necessary for a chunk header.
 DEFINE_GLOBAL(size_t)
-arena_chunk_header_npages =
-  ((sizeof(arena_chunk_t) + sizeof(arena_chunk_map_t) * (chunk_npages - 1) +
+gChunkHeaderNumPages =
+  ((sizeof(arena_chunk_t) + sizeof(arena_chunk_map_t) * (gChunkNumPages - 1) +
     gPageSizeMask) &
    ~gPageSizeMask) >>
   gPageSize2Pow;
 
 // Max size class for arenas.
 DEFINE_GLOBAL(size_t)
-gMaxLargeClass = kChunkSize - (arena_chunk_header_npages << gPageSize2Pow);
+gMaxLargeClass = kChunkSize - (gChunkHeaderNumPages << gPageSize2Pow);
 
 // Various sanity checks that regard configuration.
 GLOBAL_ASSERT(1ULL << gPageSize2Pow == gPageSize,
               "Page size is not a power of two");
 GLOBAL_ASSERT(kQuantum >= sizeof(void*));
 GLOBAL_ASSERT(kQuantum <= gPageSize);
 GLOBAL_ASSERT(kChunkSize >= gPageSize);
 GLOBAL_ASSERT(kQuantum * 4 <= kChunkSize);
@@ -2531,38 +2531,37 @@ arena_t::InitChunk(arena_chunk_t* aChunk
   aChunk->arena = this;
 
   // Claim that no pages are in use, since the header is merely overhead.
   aChunk->ndirty = 0;
 
   // Initialize the map to contain one maximal free untouched run.
 #ifdef MALLOC_DECOMMIT
   arena_run_t* run =
-    (arena_run_t*)(uintptr_t(aChunk) +
-                   (arena_chunk_header_npages << gPageSize2Pow));
+    (arena_run_t*)(uintptr_t(aChunk) + (gChunkHeaderNumPages << gPageSize2Pow));
 #endif
 
-  for (i = 0; i < arena_chunk_header_npages; i++) {
+  for (i = 0; i < gChunkHeaderNumPages; i++) {
     aChunk->map[i].bits = 0;
   }
   aChunk->map[i].bits = gMaxLargeClass | flags;
-  for (i++; i < chunk_npages - 1; i++) {
+  for (i++; i < gChunkNumPages - 1; i++) {
     aChunk->map[i].bits = flags;
   }
-  aChunk->map[chunk_npages - 1].bits = gMaxLargeClass | flags;
+  aChunk->map[gChunkNumPages - 1].bits = gMaxLargeClass | flags;
 
 #ifdef MALLOC_DECOMMIT
   // Start out decommitted, in order to force a closer correspondence
   // between dirty pages and committed untouched pages.
   pages_decommit(run, gMaxLargeClass);
 #endif
-  mStats.committed += arena_chunk_header_npages;
+  mStats.committed += gChunkHeaderNumPages;
 
   // Insert the run into the tree of available runs.
-  mRunsAvail.Insert(&aChunk->map[arena_chunk_header_npages]);
+  mRunsAvail.Insert(&aChunk->map[gChunkHeaderNumPages]);
 
 #ifdef MALLOC_DOUBLE_PURGE
   new (&aChunk->chunks_madvised_elem) DoublyLinkedListElement<arena_chunk_t>();
 #endif
 }
 
 void
 arena_t::DeallocChunk(arena_chunk_t* aChunk)
@@ -2577,23 +2576,23 @@ arena_t::DeallocChunk(arena_chunk_t* aCh
 #ifdef MALLOC_DOUBLE_PURGE
     if (mChunksMAdvised.ElementProbablyInList(mSpare)) {
       mChunksMAdvised.remove(mSpare);
     }
 #endif
 
     chunk_dealloc((void*)mSpare, kChunkSize, ARENA_CHUNK);
     mStats.mapped -= kChunkSize;
-    mStats.committed -= arena_chunk_header_npages;
+    mStats.committed -= gChunkHeaderNumPages;
   }
 
   // Remove run from the tree of available runs, so that the arena does not use it.
   // Dirty page flushing only uses the tree of dirty chunks, so leaving this
   // chunk in the chunks_* trees is sufficient for that purpose.
-  mRunsAvail.Remove(&aChunk->map[arena_chunk_header_npages]);
+  mRunsAvail.Remove(&aChunk->map[gChunkHeaderNumPages]);
 
   mSpare = aChunk;
 }
 
 arena_run_t*
 arena_t::AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero)
 {
   arena_run_t* run;
@@ -2612,32 +2611,32 @@ arena_t::AllocRun(arena_bin_t* aBin, siz
       (uintptr_t(mapelm) - uintptr_t(chunk->map)) / sizeof(arena_chunk_map_t);
 
     run = (arena_run_t*)(uintptr_t(chunk) + (pageind << gPageSize2Pow));
   } else if (mSpare) {
     // Use the spare.
     arena_chunk_t* chunk = mSpare;
     mSpare = nullptr;
     run = (arena_run_t*)(uintptr_t(chunk) +
-                         (arena_chunk_header_npages << gPageSize2Pow));
+                         (gChunkHeaderNumPages << gPageSize2Pow));
     // Insert the run into the tree of available runs.
-    mRunsAvail.Insert(&chunk->map[arena_chunk_header_npages]);
+    mRunsAvail.Insert(&chunk->map[gChunkHeaderNumPages]);
   } else {
     // No usable runs.  Create a new chunk from which to allocate
     // the run.
     bool zeroed;
     arena_chunk_t* chunk =
       (arena_chunk_t*)chunk_alloc(kChunkSize, kChunkSize, false, &zeroed);
     if (!chunk) {
       return nullptr;
     }
 
     InitChunk(chunk, zeroed);
     run = (arena_run_t*)(uintptr_t(chunk) +
-                         (arena_chunk_header_npages << gPageSize2Pow));
+                         (gChunkHeaderNumPages << gPageSize2Pow));
   }
   // Update page map.
   return SplitRun(run, aSize, aLarge, aZero) ? run : nullptr;
 }
 
 void
 arena_t::Purge(bool aAll)
 {
@@ -2660,30 +2659,30 @@ arena_t::Purge(bool aAll)
   // purged.
   while (mNumDirty > (dirty_max >> 1)) {
 #ifdef MALLOC_DOUBLE_PURGE
     bool madvised = false;
 #endif
     chunk = mChunksDirty.Last();
     MOZ_DIAGNOSTIC_ASSERT(chunk);
 
-    for (i = chunk_npages - 1; chunk->ndirty > 0; i--) {
-      MOZ_DIAGNOSTIC_ASSERT(i >= arena_chunk_header_npages);
+    for (i = gChunkNumPages - 1; chunk->ndirty > 0; i--) {
+      MOZ_DIAGNOSTIC_ASSERT(i >= gChunkHeaderNumPages);
 
       if (chunk->map[i].bits & CHUNK_MAP_DIRTY) {
 #ifdef MALLOC_DECOMMIT
         const size_t free_operation = CHUNK_MAP_DECOMMITTED;
 #else
         const size_t free_operation = CHUNK_MAP_MADVISED;
 #endif
         MOZ_ASSERT((chunk->map[i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) ==
                    0);
         chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
         // Find adjacent dirty run(s).
-        for (npages = 1; i > arena_chunk_header_npages &&
+        for (npages = 1; i > gChunkHeaderNumPages &&
                          (chunk->map[i - 1].bits & CHUNK_MAP_DIRTY);
              npages++) {
           i--;
           MOZ_ASSERT((chunk->map[i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) ==
                      0);
           chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
         }
         chunk->ndirty -= npages;
@@ -2728,18 +2727,18 @@ arena_t::Purge(bool aAll)
 void
 arena_t::DallocRun(arena_run_t* aRun, bool aDirty)
 {
   arena_chunk_t* chunk;
   size_t size, run_ind, run_pages;
 
   chunk = GetChunkForPtr(aRun);
   run_ind = (size_t)((uintptr_t(aRun) - uintptr_t(chunk)) >> gPageSize2Pow);
-  MOZ_DIAGNOSTIC_ASSERT(run_ind >= arena_chunk_header_npages);
-  MOZ_DIAGNOSTIC_ASSERT(run_ind < chunk_npages);
+  MOZ_DIAGNOSTIC_ASSERT(run_ind >= gChunkHeaderNumPages);
+  MOZ_DIAGNOSTIC_ASSERT(run_ind < gChunkNumPages);
   if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0) {
     size = chunk->map[run_ind].bits & ~gPageSizeMask;
   } else {
     size = aRun->bin->mRunSize;
   }
   run_pages = (size >> gPageSize2Pow);
 
   // Mark pages as unallocated in the chunk map.
@@ -2764,17 +2763,17 @@ arena_t::DallocRun(arena_run_t* aRun, bo
       chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
     }
   }
   chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits & gPageSizeMask);
   chunk->map[run_ind + run_pages - 1].bits =
     size | (chunk->map[run_ind + run_pages - 1].bits & gPageSizeMask);
 
   // Try to coalesce forward.
-  if (run_ind + run_pages < chunk_npages &&
+  if (run_ind + run_pages < gChunkNumPages &&
       (chunk->map[run_ind + run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) {
     size_t nrun_size = chunk->map[run_ind + run_pages].bits & ~gPageSizeMask;
 
     // Remove successor from tree of available runs; the coalesced run is
     // inserted later.
     mRunsAvail.Remove(&chunk->map[run_ind + run_pages]);
 
     size += nrun_size;
@@ -2784,17 +2783,17 @@ arena_t::DallocRun(arena_run_t* aRun, bo
       (chunk->map[run_ind + run_pages - 1].bits & ~gPageSizeMask) == nrun_size);
     chunk->map[run_ind].bits =
       size | (chunk->map[run_ind].bits & gPageSizeMask);
     chunk->map[run_ind + run_pages - 1].bits =
       size | (chunk->map[run_ind + run_pages - 1].bits & gPageSizeMask);
   }
 
   // Try to coalesce backward.
-  if (run_ind > arena_chunk_header_npages &&
+  if (run_ind > gChunkHeaderNumPages &&
       (chunk->map[run_ind - 1].bits & CHUNK_MAP_ALLOCATED) == 0) {
     size_t prun_size = chunk->map[run_ind - 1].bits & ~gPageSizeMask;
 
     run_ind -= prun_size >> gPageSize2Pow;
 
     // Remove predecessor from tree of available runs; the coalesced run is
     // inserted later.
     mRunsAvail.Remove(&chunk->map[run_ind]);
@@ -2809,17 +2808,17 @@ arena_t::DallocRun(arena_run_t* aRun, bo
     chunk->map[run_ind + run_pages - 1].bits =
       size | (chunk->map[run_ind + run_pages - 1].bits & gPageSizeMask);
   }
 
   // Insert into tree of available runs, now that coalescing is complete.
   mRunsAvail.Insert(&chunk->map[run_ind]);
 
   // Deallocate chunk if it is now completely unused.
-  if ((chunk->map[arena_chunk_header_npages].bits &
+  if ((chunk->map[gChunkHeaderNumPages].bits &
        (~gPageSizeMask | CHUNK_MAP_ALLOCATED)) == gMaxLargeClass) {
     DeallocChunk(chunk);
   }
 
   // Enforce mMaxDirty.
   if (mNumDirty > mMaxDirty) {
     Purge(false);
   }
@@ -3402,17 +3401,17 @@ MozJemalloc::jemalloc_ptr_info(const voi
     *aInfo = { TagUnknown, nullptr, 0 };
     return;
   }
 
   MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
 
   // Get the page number within the chunk.
   size_t pageind = (((uintptr_t)aPtr - (uintptr_t)chunk) >> gPageSize2Pow);
-  if (pageind < arena_chunk_header_npages) {
+  if (pageind < gChunkHeaderNumPages) {
     // Within the chunk header.
     *aInfo = { TagUnknown, nullptr, 0 };
     return;
   }
 
   size_t mapbits = chunk->map[pageind].bits;
 
   if (!(mapbits & CHUNK_MAP_ALLOCATED)) {
@@ -3443,18 +3442,18 @@ MozJemalloc::jemalloc_ptr_info(const voi
       size = mapbits & ~gPageSizeMask;
       if (size != 0) {
         break;
       }
 
       // The following two return paths shouldn't occur in
       // practice unless there is heap corruption.
       pageind--;
-      MOZ_DIAGNOSTIC_ASSERT(pageind >= arena_chunk_header_npages);
-      if (pageind < arena_chunk_header_npages) {
+      MOZ_DIAGNOSTIC_ASSERT(pageind >= gChunkHeaderNumPages);
+      if (pageind < gChunkHeaderNumPages) {
         *aInfo = { TagUnknown, nullptr, 0 };
         return;
       }
 
       mapbits = chunk->map[pageind].bits;
       MOZ_DIAGNOSTIC_ASSERT(mapbits & CHUNK_MAP_LARGE);
       if (!(mapbits & CHUNK_MAP_LARGE)) {
         *aInfo = { TagUnknown, nullptr, 0 };
@@ -3662,17 +3661,17 @@ arena_t::RallocGrowLarge(arena_chunk_t* 
   size_t npages = aOldSize >> gPageSize2Pow;
 
   MutexAutoLock lock(mLock);
   MOZ_DIAGNOSTIC_ASSERT(aOldSize ==
                         (aChunk->map[pageind].bits & ~gPageSizeMask));
 
   // Try to extend the run.
   MOZ_ASSERT(aSize > aOldSize);
-  if (pageind + npages < chunk_npages &&
+  if (pageind + npages < gChunkNumPages &&
       (aChunk->map[pageind + npages].bits & CHUNK_MAP_ALLOCATED) == 0 &&
       (aChunk->map[pageind + npages].bits & ~gPageSizeMask) >=
         aSize - aOldSize) {
     // The next run is available and sufficiently large.  Split the
     // following run, then merge the first part with the existing
     // allocation.
     if (!SplitRun((arena_run_t*)(uintptr_t(aChunk) +
                                  ((pageind + npages) << gPageSize2Pow)),
@@ -4599,17 +4598,17 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
     aStats->bin_unused += arena_unused;
     aStats->bookkeeping += arena_headers;
     aStats->narenas++;
   }
   gArenas.mLock.Unlock();
 
   // Account for arena chunk headers in bookkeeping rather than waste.
   chunk_header_size =
-    ((aStats->mapped / aStats->chunksize) * arena_chunk_header_npages)
+    ((aStats->mapped / aStats->chunksize) * gChunkHeaderNumPages)
     << gPageSize2Pow;
 
   aStats->mapped += non_arena_mapped;
   aStats->bookkeeping += chunk_header_size;
   aStats->waste -= chunk_header_size;
 
   MOZ_ASSERT(aStats->mapped >= aStats->allocated + aStats->waste +
                                  aStats->page_cache + aStats->bookkeeping);
@@ -4617,21 +4616,21 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
 
 #ifdef MALLOC_DOUBLE_PURGE
 
 // Explicitly remove all of this chunk's MADV_FREE'd pages from memory.
 static void
 hard_purge_chunk(arena_chunk_t* aChunk)
 {
   // See similar logic in arena_t::Purge().
-  for (size_t i = arena_chunk_header_npages; i < chunk_npages; i++) {
+  for (size_t i = gChunkHeaderNumPages; i < gChunkNumPages; i++) {
     // Find all adjacent pages with CHUNK_MAP_MADVISED set.
     size_t npages;
     for (npages = 0; aChunk->map[i + npages].bits & CHUNK_MAP_MADVISED &&
-                     i + npages < chunk_npages;
+                     i + npages < gChunkNumPages;
          npages++) {
       // Turn off the chunk's MADV_FREED bit and turn on its
       // DECOMMITTED bit.
       MOZ_DIAGNOSTIC_ASSERT(
         !(aChunk->map[i + npages].bits & CHUNK_MAP_DECOMMITTED));
       aChunk->map[i + npages].bits ^= CHUNK_MAP_MADVISED_OR_DECOMMITTED;
     }