Bug 1413475 - Replace SIZEOF_INT_2POW with LOG2(sizeof(int)). r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Wed, 01 Nov 2017 16:47:59 +0900
changeset 690339 e10b6526a60163f55bd82a352ba691d5731774f7
parent 690338 421035e0251414adbf98ea93c7a37fd8566a3ca4
child 690340 c530f8b5daa08004cde7f1ab75f071224d7406c1
push id87285
push userbmo:mh+mozilla@glandium.org
push dateWed, 01 Nov 2017 22:04:01 +0000
reviewersnjn
bugs1413475
milestone58.0a1
Bug 1413475 - Replace SIZEOF_INT_2POW with LOG2(sizeof(int)). r?njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -247,21 +247,16 @@ static inline void*
 #define mmap _mmap
 #define munmap(a, l) syscall(SYS_munmap, a, l)
 #endif
 #endif
 
 // Minimum alignment of non-tiny allocations is 2^QUANTUM_2POW_MIN bytes.
 #define QUANTUM_2POW_MIN 4
 
-// sizeof(int) == (1U << SIZEOF_INT_2POW).
-#ifndef SIZEOF_INT_2POW
-#define SIZEOF_INT_2POW 2
-#endif
-
 // Size and alignment of memory chunks that are allocated by the OS's virtual
 // memory system.
 #define CHUNK_2POW_DEFAULT 20
 // Maximum number of dirty pages per arena.
 #define DIRTY_MAX_DEFAULT (1U << 8)
 
 static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
 
@@ -2222,17 +2217,17 @@ arena_run_reg_alloc(arena_run_t* run, ar
   // be updated unconditionally, without the possibility of updating it
   // multiple times.
   i = run->regs_minelm;
   mask = run->regs_mask[i];
   if (mask != 0) {
     // Usable allocation found.
     bit = CountTrailingZeroes32(mask);
 
-    regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
+    regind = ((i << (LOG2(sizeof(int)) + 3)) + bit);
     MOZ_ASSERT(regind < bin->nregs);
     ret =
       (void*)(((uintptr_t)run) + bin->reg0_offset + (bin->reg_size * regind));
 
     // Clear bit.
     mask ^= (1U << bit);
     run->regs_mask[i] = mask;
 
@@ -2240,17 +2235,17 @@ arena_run_reg_alloc(arena_run_t* run, ar
   }
 
   for (i++; i < bin->regs_mask_nelms; i++) {
     mask = run->regs_mask[i];
     if (mask != 0) {
       // Usable allocation found.
       bit = CountTrailingZeroes32(mask);
 
-      regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
+      regind = ((i << (LOG2(sizeof(int)) + 3)) + bit);
       MOZ_ASSERT(regind < bin->nregs);
       ret =
         (void*)(((uintptr_t)run) + bin->reg0_offset + (bin->reg_size * regind));
 
       // Clear bit.
       mask ^= (1U << bit);
       run->regs_mask[i] = mask;
 
@@ -2348,21 +2343,21 @@ arena_run_reg_dalloc(arena_run_t* run, a
     // calculate regind using actual division.  This only happens
     // if the user increases small_max via the 'S' runtime
     // configuration option.
     regind = diff / size;
   };
   MOZ_DIAGNOSTIC_ASSERT(diff == regind * size);
   MOZ_DIAGNOSTIC_ASSERT(regind < bin->nregs);
 
-  elm = regind >> (SIZEOF_INT_2POW + 3);
+  elm = regind >> (LOG2(sizeof(int)) + 3);
   if (elm < run->regs_minelm) {
     run->regs_minelm = elm;
   }
-  bit = regind - (elm << (SIZEOF_INT_2POW + 3));
+  bit = regind - (elm << (LOG2(sizeof(int)) + 3));
   MOZ_DIAGNOSTIC_ASSERT((run->regs_mask[elm] & (1U << bit)) == 0);
   run->regs_mask[elm] |= (1U << bit);
 #undef SIZE_INV
 #undef SIZE_INV_SHIFT
 }
 
 bool
 arena_t::SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero)
@@ -2859,23 +2854,23 @@ arena_t::GetNonFullBinRun(arena_bin_t* a
   }
 
   // Initialize run internals.
   run->bin = aBin;
 
   for (i = 0; i < aBin->regs_mask_nelms - 1; i++) {
     run->regs_mask[i] = UINT_MAX;
   }
-  remainder = aBin->nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1);
+  remainder = aBin->nregs & ((1U << (LOG2(sizeof(int)) + 3)) - 1);
   if (remainder == 0) {
     run->regs_mask[i] = UINT_MAX;
   } else {
     // The last element has spare bits that need to be unset.
     run->regs_mask[i] =
-      (UINT_MAX >> ((1U << (SIZEOF_INT_2POW + 3)) - remainder));
+      (UINT_MAX >> ((1U << (LOG2(sizeof(int)) + 3)) - remainder));
   }
 
   run->regs_minelm = 0;
 
   run->nfree = aBin->nregs;
 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
   run->magic = ARENA_RUN_MAGIC;
 #endif
@@ -2942,18 +2937,18 @@ arena_bin_run_size_calc(arena_bin_t* bin
   // would be quite messy, since there is an interdependency between the
   // header's mask length and the number of regions.
   try_run_size = min_run_size;
   try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin->reg_size) +
               1; // Counter-act try_nregs-- in loop.
   do {
     try_nregs--;
     try_mask_nelms =
-      (try_nregs >> (SIZEOF_INT_2POW + 3)) +
-      ((try_nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1)) ? 1 : 0);
+      (try_nregs >> (LOG2(sizeof(int)) + 3)) +
+      ((try_nregs & ((1U << (LOG2(sizeof(int)) + 3)) - 1)) ? 1 : 0);
     try_reg0_offset = try_run_size - (try_nregs * bin->reg_size);
   } while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1)) >
            try_reg0_offset);
 
   // run_size expansion loop.
   do {
     // Copy valid settings before trying more aggressive settings.
     good_run_size = try_run_size;
@@ -2963,28 +2958,28 @@ arena_bin_run_size_calc(arena_bin_t* bin
 
     // Try more aggressive settings.
     try_run_size += pagesize;
     try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin->reg_size) +
                 1; // Counter-act try_nregs-- in loop.
     do {
       try_nregs--;
       try_mask_nelms =
-        (try_nregs >> (SIZEOF_INT_2POW + 3)) +
-        ((try_nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1)) ? 1 : 0);
+        (try_nregs >> (LOG2(sizeof(int)) + 3)) +
+        ((try_nregs & ((1U << (LOG2(sizeof(int)) + 3)) - 1)) ? 1 : 0);
       try_reg0_offset = try_run_size - (try_nregs * bin->reg_size);
     } while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1)) >
              try_reg0_offset);
   } while (try_run_size <= arena_maxclass &&
            RUN_MAX_OVRHD * (bin->reg_size << 3) > RUN_MAX_OVRHD_RELAX &&
            (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size);
 
   MOZ_ASSERT(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1)) <=
              good_reg0_offset);
-  MOZ_ASSERT((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
+  MOZ_ASSERT((good_mask_nelms << (LOG2(sizeof(int)) + 3)) >= good_nregs);
 
   // Copy final settings.
   bin->run_size = good_run_size;
   bin->nregs = good_nregs;
   bin->regs_mask_nelms = good_mask_nelms;
   bin->reg0_offset = good_reg0_offset;
 
   return good_run_size;
@@ -3448,18 +3443,18 @@ MozJemalloc::jemalloc_ptr_info(const voi
 
   // Position in the run.
   unsigned regind = ((uintptr_t)aPtr - reg0_addr) / size;
 
   // Pointer to the allocation's base address.
   void* addr = (void*)(reg0_addr + regind * size);
 
   // Check if the allocation has been freed.
-  unsigned elm = regind >> (SIZEOF_INT_2POW + 3);
-  unsigned bit = regind - (elm << (SIZEOF_INT_2POW + 3));
+  unsigned elm = regind >> (LOG2(sizeof(int)) + 3);
+  unsigned bit = regind - (elm << (LOG2(sizeof(int)) + 3));
   PtrInfoTag tag =
     ((run->regs_mask[elm] & (1U << bit))) ? TagFreedSmall : TagLiveSmall;
 
   *aInfo = { tag, addr, size };
 }
 
 namespace Debug {
 // Helper for debuggers. We don't want it to be inlined and optimized out.