Bug 1414155 - Move arena_chunk_map_t and arena_chunk_t around. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Wed, 01 Nov 2017 17:54:31 +0900
changeset 693242 20588eebdfb5223a3d131c1eaa71f4afebd3de10
parent 693241 14dfed6a24090042718bb213d1fd8878a9169e75
child 693243 a4f2841b47169424aec83fc951092b8accff4614
push id87735
push userbmo:mh+mozilla@glandium.org
push dateSat, 04 Nov 2017 22:08:08 +0000
reviewersnjn
bugs1414155
milestone58.0a1
Bug 1414155 - Move arena_chunk_map_t and arena_chunk_t around. r?njn At the moment, while they are used before their declaration, it's from a macro. It is desirable to replace the macros with C++ constants, which will require the structures being defined first.
memory/build/Utils.h
memory/build/mozjemalloc.cpp
--- a/memory/build/Utils.h
+++ b/memory/build/Utils.h
@@ -13,9 +13,21 @@
 template<size_t N>
 struct Log2 : mozilla::tl::CeilingLog2<N>
 {
   using mozilla::tl::CeilingLog2<N>::value;
   static_assert(1ULL << value == N, "Number is not a power of 2");
 };
 #define LOG2(N) Log2<N>::value
 
+// Compare two addresses. Returns whether the first address is smaller (-1),
+// equal (0) or greater (1) than the second address.
+template<typename T>
+int
+CompareAddr(T* aAddr1, T* aAddr2)
+{
+  uintptr_t addr1 = reinterpret_cast<uintptr_t>(aAddr1);
+  uintptr_t addr2 = reinterpret_cast<uintptr_t>(aAddr2);
+
+  return (addr1 > addr2) - (addr1 < addr2);
+}
+
 #endif
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -243,16 +243,128 @@ static inline void*
 #endif
 #endif
 }
 #define mmap _mmap
 #define munmap(a, l) syscall(SYS_munmap, a, l)
 #endif
 #endif
 
+// ***************************************************************************
+// Structures for chunk headers for chunks used for non-huge allocations.
+
+struct arena_t;
+
+// Each element of the chunk map corresponds to one page within the chunk.
+struct arena_chunk_map_t
+{
+  // Linkage for run trees.  There are two disjoint uses:
+  //
+  // 1) arena_t's tree or available runs.
+  // 2) arena_run_t conceptually uses this linkage for in-use non-full
+  //    runs, rather than directly embedding linkage.
+  RedBlackTreeNode<arena_chunk_map_t> link;
+
+  // Run address (or size) and various flags are stored together.  The bit
+  // layout looks like (assuming 32-bit system):
+  //
+  //   ???????? ???????? ????---- -mckdzla
+  //
+  // ? : Unallocated: Run address for first/last pages, unset for internal
+  //                  pages.
+  //     Small: Run address.
+  //     Large: Run size for first page, unset for trailing pages.
+  // - : Unused.
+  // m : MADV_FREE/MADV_DONTNEED'ed?
+  // c : decommitted?
+  // k : key?
+  // d : dirty?
+  // z : zeroed?
+  // l : large?
+  // a : allocated?
+  //
+  // Following are example bit patterns for the three types of runs.
+  //
+  // r : run address
+  // s : run size
+  // x : don't care
+  // - : 0
+  // [cdzla] : bit set
+  //
+  //   Unallocated:
+  //     ssssssss ssssssss ssss---- --c-----
+  //     xxxxxxxx xxxxxxxx xxxx---- ----d---
+  //     ssssssss ssssssss ssss---- -----z--
+  //
+  //   Small:
+  //     rrrrrrrr rrrrrrrr rrrr---- -------a
+  //     rrrrrrrr rrrrrrrr rrrr---- -------a
+  //     rrrrrrrr rrrrrrrr rrrr---- -------a
+  //
+  //   Large:
+  //     ssssssss ssssssss ssss---- ------la
+  //     -------- -------- -------- ------la
+  //     -------- -------- -------- ------la
+  size_t bits;
+
+// Note that CHUNK_MAP_DECOMMITTED's meaning varies depending on whether
+// MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are defined.
+//
+// If MALLOC_DECOMMIT is defined, a page which is CHUNK_MAP_DECOMMITTED must be
+// re-committed with pages_commit() before it may be touched.  If
+// MALLOC_DECOMMIT is defined, MALLOC_DOUBLE_PURGE may not be defined.
+//
+// If neither MALLOC_DECOMMIT nor MALLOC_DOUBLE_PURGE is defined, pages which
+// are madvised (with either MADV_DONTNEED or MADV_FREE) are marked with
+// CHUNK_MAP_MADVISED.
+//
+// Otherwise, if MALLOC_DECOMMIT is not defined and MALLOC_DOUBLE_PURGE is
+// defined, then a page which is madvised is marked as CHUNK_MAP_MADVISED.
+// When it's finally freed with jemalloc_purge_freed_pages, the page is marked
+// as CHUNK_MAP_DECOMMITTED.
+#define CHUNK_MAP_MADVISED ((size_t)0x40U)
+#define CHUNK_MAP_DECOMMITTED ((size_t)0x20U)
+#define CHUNK_MAP_MADVISED_OR_DECOMMITTED                                      \
+  (CHUNK_MAP_MADVISED | CHUNK_MAP_DECOMMITTED)
+#define CHUNK_MAP_KEY ((size_t)0x10U)
+#define CHUNK_MAP_DIRTY ((size_t)0x08U)
+#define CHUNK_MAP_ZEROED ((size_t)0x04U)
+#define CHUNK_MAP_LARGE ((size_t)0x02U)
+#define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
+};
+
+// Arena chunk header.
+struct arena_chunk_t
+{
+  // Arena that owns the chunk.
+  arena_t* arena;
+
+  // Linkage for the arena's tree of dirty chunks.
+  RedBlackTreeNode<arena_chunk_t> link_dirty;
+
+#ifdef MALLOC_DOUBLE_PURGE
+  // If we're double-purging, we maintain a linked list of chunks which
+  // have pages which have been madvise(MADV_FREE)'d but not explicitly
+  // purged.
+  //
+  // We're currently lazy and don't remove a chunk from this list when
+  // all its madvised pages are recommitted.
+  DoublyLinkedListElement<arena_chunk_t> chunks_madvised_elem;
+#endif
+
+  // Number of dirty pages.
+  size_t ndirty;
+
+  // Map of pages within chunk that keeps track of free/large/small.
+  arena_chunk_map_t map[1]; // Dynamically sized.
+};
+
+// ***************************************************************************
+// Constants defining allocator size classes and behavior.
+
 // Minimum alignment of non-tiny allocations is 2^QUANTUM_2POW_MIN bytes.
 #define QUANTUM_2POW_MIN 4
 
 // Size and alignment of memory chunks that are allocated by the OS's virtual
 // memory system.
 #define CHUNK_2POW_DEFAULT 20
 
 // Maximum size of L1 cache line.  This is used to avoid cache line aliasing,
@@ -515,26 +627,16 @@ struct extent_node_t
 
   // Total region size.
   size_t size;
 
   // What type of chunk is there; used by chunk recycling code.
   ChunkType chunk_type;
 };
 
-template<typename T>
-int
-CompareAddr(T* aAddr1, T* aAddr2)
-{
-  uintptr_t addr1 = reinterpret_cast<uintptr_t>(aAddr1);
-  uintptr_t addr2 = reinterpret_cast<uintptr_t>(aAddr2);
-
-  return (addr1 > addr2) - (addr1 < addr2);
-}
-
 struct ExtentTreeSzTrait
 {
   static RedBlackTreeNode<extent_node_t>& GetTreeNode(extent_node_t* aThis)
   {
     return aThis->link_szad;
   }
 
   static inline int Compare(extent_node_t* aNode, extent_node_t* aOther)
@@ -663,97 +765,18 @@ public:
 
 private:
   inline void** GetSlot(void* aAddr, bool aCreate = false);
 };
 
 // ***************************************************************************
 // Arena data structures.
 
-struct arena_t;
 struct arena_bin_t;
 
-// Each element of the chunk map corresponds to one page within the chunk.
-struct arena_chunk_map_t
-{
-  // Linkage for run trees.  There are two disjoint uses:
-  //
-  // 1) arena_t's tree or available runs.
-  // 2) arena_run_t conceptually uses this linkage for in-use non-full
-  //    runs, rather than directly embedding linkage.
-  RedBlackTreeNode<arena_chunk_map_t> link;
-
-  // Run address (or size) and various flags are stored together.  The bit
-  // layout looks like (assuming 32-bit system):
-  //
-  //   ???????? ???????? ????---- -mckdzla
-  //
-  // ? : Unallocated: Run address for first/last pages, unset for internal
-  //                  pages.
-  //     Small: Run address.
-  //     Large: Run size for first page, unset for trailing pages.
-  // - : Unused.
-  // m : MADV_FREE/MADV_DONTNEED'ed?
-  // c : decommitted?
-  // k : key?
-  // d : dirty?
-  // z : zeroed?
-  // l : large?
-  // a : allocated?
-  //
-  // Following are example bit patterns for the three types of runs.
-  //
-  // r : run address
-  // s : run size
-  // x : don't care
-  // - : 0
-  // [cdzla] : bit set
-  //
-  //   Unallocated:
-  //     ssssssss ssssssss ssss---- --c-----
-  //     xxxxxxxx xxxxxxxx xxxx---- ----d---
-  //     ssssssss ssssssss ssss---- -----z--
-  //
-  //   Small:
-  //     rrrrrrrr rrrrrrrr rrrr---- -------a
-  //     rrrrrrrr rrrrrrrr rrrr---- -------a
-  //     rrrrrrrr rrrrrrrr rrrr---- -------a
-  //
-  //   Large:
-  //     ssssssss ssssssss ssss---- ------la
-  //     -------- -------- -------- ------la
-  //     -------- -------- -------- ------la
-  size_t bits;
-
-// Note that CHUNK_MAP_DECOMMITTED's meaning varies depending on whether
-// MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are defined.
-//
-// If MALLOC_DECOMMIT is defined, a page which is CHUNK_MAP_DECOMMITTED must be
-// re-committed with pages_commit() before it may be touched.  If
-// MALLOC_DECOMMIT is defined, MALLOC_DOUBLE_PURGE may not be defined.
-//
-// If neither MALLOC_DECOMMIT nor MALLOC_DOUBLE_PURGE is defined, pages which
-// are madvised (with either MADV_DONTNEED or MADV_FREE) are marked with
-// CHUNK_MAP_MADVISED.
-//
-// Otherwise, if MALLOC_DECOMMIT is not defined and MALLOC_DOUBLE_PURGE is
-// defined, then a page which is madvised is marked as CHUNK_MAP_MADVISED.
-// When it's finally freed with jemalloc_purge_freed_pages, the page is marked
-// as CHUNK_MAP_DECOMMITTED.
-#define CHUNK_MAP_MADVISED ((size_t)0x40U)
-#define CHUNK_MAP_DECOMMITTED ((size_t)0x20U)
-#define CHUNK_MAP_MADVISED_OR_DECOMMITTED                                      \
-  (CHUNK_MAP_MADVISED | CHUNK_MAP_DECOMMITTED)
-#define CHUNK_MAP_KEY ((size_t)0x10U)
-#define CHUNK_MAP_DIRTY ((size_t)0x08U)
-#define CHUNK_MAP_ZEROED ((size_t)0x04U)
-#define CHUNK_MAP_LARGE ((size_t)0x02U)
-#define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
-};
-
 struct ArenaChunkMapLink
 {
   static RedBlackTreeNode<arena_chunk_map_t>& GetTreeNode(
     arena_chunk_map_t* aThis)
   {
     return aThis->link;
   }
 };
@@ -776,42 +799,16 @@ struct ArenaAvailTreeTrait : public Aren
     size_t size2 = aOther->bits & ~pagesize_mask;
     int ret = (size1 > size2) - (size1 < size2);
     return ret ? ret
                : CompareAddr((aNode->bits & CHUNK_MAP_KEY) ? nullptr : aNode,
                              aOther);
   }
 };
 
-// Arena chunk header.
-struct arena_chunk_t
-{
-  // Arena that owns the chunk.
-  arena_t* arena;
-
-  // Linkage for the arena's tree of dirty chunks.
-  RedBlackTreeNode<arena_chunk_t> link_dirty;
-
-#ifdef MALLOC_DOUBLE_PURGE
-  // If we're double-purging, we maintain a linked list of chunks which
-  // have pages which have been madvise(MADV_FREE)'d but not explicitly
-  // purged.
-  //
-  // We're currently lazy and don't remove a chunk from this list when
-  // all its madvised pages are recommitted.
-  DoublyLinkedListElement<arena_chunk_t> chunks_madvised_elem;
-#endif
-
-  // Number of dirty pages.
-  size_t ndirty;
-
-  // Map of pages within chunk that keeps track of free/large/small.
-  arena_chunk_map_t map[1]; // Dynamically sized.
-};
-
 struct ArenaDirtyChunkTrait
 {
   static RedBlackTreeNode<arena_chunk_t>& GetTreeNode(arena_chunk_t* aThis)
   {
     return aThis->link_dirty;
   }
 
   static inline int Compare(arena_chunk_t* aNode, arena_chunk_t* aOther)