Bug 1414155 - Move a few things around. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Wed, 01 Nov 2017 19:29:36 +0900
changeset 693240 bab4c5597518a6e1240f1b517224b4b8658a2f60
parent 693006 66f496680fae6e7d8f02bc17ff58b9234ee07c70
child 693241 14dfed6a24090042718bb213d1fd8878a9169e75
push id87735
push userbmo:mh+mozilla@glandium.org
push dateSat, 04 Nov 2017 22:08:08 +0000
reviewersnjn
bugs1414155
milestone58.0a1
Bug 1414155 - Move a few things around. r?njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -249,20 +249,16 @@ static inline void*
 #endif
 
 // Minimum alignment of non-tiny allocations is 2^QUANTUM_2POW_MIN bytes.
 #define QUANTUM_2POW_MIN 4
 
 // Size and alignment of memory chunks that are allocated by the OS's virtual
 // memory system.
 #define CHUNK_2POW_DEFAULT 20
-// Maximum number of dirty pages per arena.
-#define DIRTY_MAX_DEFAULT (1U << 8)
-
-static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
 
 // Maximum size of L1 cache line.  This is used to avoid cache line aliasing,
 // so over-estimates are okay (up to a point), but under-estimates will
 // negatively affect performance.
 #define CACHELINE_2POW 6
 #define CACHELINE ((size_t)(1U << CACHELINE_2POW))
 
 // Smallest size class to support.  On Windows the smallest allocation size
@@ -275,35 +271,16 @@ static size_t opt_dirty_max = DIRTY_MAX_
 #endif
 
 // Maximum size class that is a multiple of the quantum, but not (necessarily)
 // a power of 2.  Above this size, allocations are rounded up to the nearest
 // power of 2.
 #define SMALL_MAX_2POW_DEFAULT 9
 #define SMALL_MAX_DEFAULT (1U << SMALL_MAX_2POW_DEFAULT)
 
-// RUN_MAX_OVRHD indicates maximum desired run header overhead.  Runs are sized
-// as small as possible such that this setting is still honored, without
-// violating other constraints.  The goal is to make runs as small as possible
-// without exceeding a per run external fragmentation threshold.
-//
-// We use binary fixed point math for overhead computations, where the binary
-// point is implicitly RUN_BFP bits to the left.
-//
-// Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
-// honored for some/all object sizes, since there is one bit of header overhead
-// per object (plus a constant).  This constraint is relaxed (ignored) for runs
-// that are so small that the per-region overhead is greater than:
-//
-//   (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
-#define RUN_BFP 12
-//                                    \/   Implicit binary fixed point.
-#define RUN_MAX_OVRHD 0x0000003dU
-#define RUN_MAX_OVRHD_RELAX 0x00001800U
-
 // When MALLOC_STATIC_PAGESIZE is defined, the page size is fixed at
 // compile-time for better performance, as opposed to determined at
 // runtime. Some platforms can have different page sizes at runtime
 // depending on kernel configuration, so they are opted out by default.
 // Debug builds are opted out too, for test coverage.
 #ifndef MOZ_DEBUG
 #if !defined(__ia64__) && !defined(__sparc__) && !defined(__mips__) &&         \
   !defined(__aarch64__)
@@ -396,16 +373,52 @@ static size_t arena_maxclass; // Max siz
 // 6.25% of the process address space on a 32-bit OS for later use.
 #define CHUNK_RECYCLE_LIMIT 128
 
 static const size_t gRecycleLimit = CHUNK_RECYCLE_LIMIT * CHUNKSIZE_DEFAULT;
 
 // The current amount of recycled bytes, updated atomically.
 static Atomic<size_t, ReleaseAcquire> gRecycledSize;
 
+// Maximum number of dirty pages per arena.
+#define DIRTY_MAX_DEFAULT (1U << 8)
+
+static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
+
+// RUN_MAX_OVRHD indicates maximum desired run header overhead.  Runs are sized
+// as small as possible such that this setting is still honored, without
+// violating other constraints.  The goal is to make runs as small as possible
+// without exceeding a per run external fragmentation threshold.
+//
+// We use binary fixed point math for overhead computations, where the binary
+// point is implicitly RUN_BFP bits to the left.
+//
+// Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
+// honored for some/all object sizes, since there is one bit of header overhead
+// per object (plus a constant).  This constraint is relaxed (ignored) for runs
+// that are so small that the per-region overhead is greater than:
+//
+//   (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
+#define RUN_BFP 12
+//                                    \/   Implicit binary fixed point.
+#define RUN_MAX_OVRHD 0x0000003dU
+#define RUN_MAX_OVRHD_RELAX 0x00001800U
+
+// Return the smallest chunk multiple that is >= s.
+#define CHUNK_CEILING(s) (((s) + chunksize_mask) & ~chunksize_mask)
+
+// Return the smallest cacheline multiple that is >= s.
+#define CACHELINE_CEILING(s) (((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
+
+// Return the smallest quantum multiple that is >= a.
+#define QUANTUM_CEILING(a) (((a) + quantum_mask) & ~quantum_mask)
+
+// Return the smallest pagesize multiple that is >= s.
+#define PAGE_CEILING(s) (((s) + pagesize_mask) & ~pagesize_mask)
+
 // ***************************************************************************
 // MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
 #if defined(MALLOC_DECOMMIT) && defined(MALLOC_DOUBLE_PURGE)
 #error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
 #endif
 
 static void*
 base_alloc(size_t aSize);
@@ -1312,28 +1325,16 @@ GetChunkForPtr(const void* aPtr)
 
 // Return the chunk offset of address a.
 static inline size_t
 GetChunkOffsetForPtr(const void* aPtr)
 {
   return (size_t)(uintptr_t(aPtr) & chunksize_mask);
 }
 
-// Return the smallest chunk multiple that is >= s.
-#define CHUNK_CEILING(s) (((s) + chunksize_mask) & ~chunksize_mask)
-
-// Return the smallest cacheline multiple that is >= s.
-#define CACHELINE_CEILING(s) (((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
-
-// Return the smallest quantum multiple that is >= a.
-#define QUANTUM_CEILING(a) (((a) + quantum_mask) & ~quantum_mask)
-
-// Return the smallest pagesize multiple that is >= s.
-#define PAGE_CEILING(s) (((s) + pagesize_mask) & ~pagesize_mask)
-
 static inline const char*
 _getprogname(void)
 {
 
   return "<jemalloc>";
 }
 
 // ***************************************************************************