Bug 1403444 - Add methods to the RedBlackTree template class, replacing rb_wrap. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Tue, 26 Sep 2017 15:06:00 +0900
changeset 670923 1a36d9db84195e54716692968d4e69d8ca29db75
parent 670922 b253031aac5e39ad9a61fec905ad58830b1884c5
child 670924 b5a37bce211143e98856cf252f8e52f414425977
push id81767
push userbmo:mh+mozilla@glandium.org
push dateWed, 27 Sep 2017 06:43:40 +0000
reviewersnjn
bugs1403444
milestone58.0a1
Bug 1403444 - Add methods to the RedBlackTree template class, replacing rb_wrap. r?njn
memory/build/mozjemalloc.cpp
memory/build/rb.h
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -590,17 +590,70 @@ struct extent_node_t {
 	void	*addr;
 
 	/* Total region size. */
 	size_t	size;
 
 	/* What type of chunk is there; used by chunk recycling code. */
 	ChunkType chunk_type;
 };
-typedef RedBlackTree<extent_node_t> extent_tree_t;
+
+template<typename T>
+int
+CompareAddr(T* a, T* b)
+{
+  uintptr_t a_addr = reinterpret_cast<uintptr_t>(a);
+  uintptr_t b_addr = reinterpret_cast<uintptr_t>(b);
+
+  return (a_addr > b_addr) - (a_addr < b_addr);
+}
+
+struct ExtentTreeSzTrait
+{
+  static RedBlackTreeNode<extent_node_t>& GetTreeNode(extent_node_t* aThis)
+  {
+    return aThis->link_szad;
+  }
+
+  static inline int Compare(extent_node_t* a, extent_node_t* b)
+  {
+    int ret = (a->size > b->size) - (a->size < b->size);
+    return ret ? ret : CompareAddr(a->addr, b->addr);
+  }
+};
+
+struct ExtentTreeTrait
+{
+  static RedBlackTreeNode<extent_node_t>& GetTreeNode(extent_node_t* aThis)
+  {
+    return aThis->link_ad;
+  }
+
+  static inline int Compare(extent_node_t* a, extent_node_t* b)
+  {
+    return CompareAddr(a->addr, b->addr);
+  }
+};
+
+struct ExtentTreeBoundsTrait : public ExtentTreeTrait
+{
+  static inline int Compare(extent_node_t* aKey, extent_node_t* aNode)
+  {
+    uintptr_t key_addr = reinterpret_cast<uintptr_t>(aKey->addr);
+    uintptr_t node_addr = reinterpret_cast<uintptr_t>(aNode->addr);
+    size_t node_size = aNode->size;
+
+    // Is aKey within aNode?
+    if (node_addr <= key_addr && key_addr < node_addr + node_size) {
+      return 0;
+    }
+
+    return (key_addr > node_addr) - (key_addr < node_addr);
+  }
+};
 
 /******************************************************************************/
 /*
  * Radix tree data structures.
  */
 
 /*
  * Size of each radix tree node (must be a power of 2).  This impacts tree
@@ -702,18 +755,49 @@ struct arena_chunk_map_t {
 #define	CHUNK_MAP_DECOMMITTED	((size_t)0x20U)
 #define	CHUNK_MAP_MADVISED_OR_DECOMMITTED (CHUNK_MAP_MADVISED | CHUNK_MAP_DECOMMITTED)
 #define	CHUNK_MAP_KEY		((size_t)0x10U)
 #define	CHUNK_MAP_DIRTY		((size_t)0x08U)
 #define	CHUNK_MAP_ZEROED	((size_t)0x04U)
 #define	CHUNK_MAP_LARGE		((size_t)0x02U)
 #define	CHUNK_MAP_ALLOCATED	((size_t)0x01U)
 };
-typedef RedBlackTree<arena_chunk_map_t> arena_avail_tree_t;
-typedef RedBlackTree<arena_chunk_map_t> arena_run_tree_t;
+struct ArenaChunkMapLink
+{
+  static RedBlackTreeNode<arena_chunk_map_t>& GetTreeNode(arena_chunk_map_t* aThis)
+  {
+    return aThis->link;
+  }
+};
+
+struct ArenaRunTreeTrait : public ArenaChunkMapLink
+{
+  static inline int Compare(arena_chunk_map_t* a, arena_chunk_map_t* b)
+  {
+    MOZ_ASSERT(a);
+    MOZ_ASSERT(b);
+    return CompareAddr(a, b);
+  }
+};
+
+struct ArenaAvailTreeTrait : public ArenaChunkMapLink
+{
+  static inline int Compare(arena_chunk_map_t* a, arena_chunk_map_t* b)
+  {
+    int ret;
+    size_t a_size = a->bits & ~pagesize_mask;
+    size_t b_size = b->bits & ~pagesize_mask;
+
+    ret = (a_size > b_size) - (a_size < b_size);
+    return ret ? ret : CompareAddr((a->bits & CHUNK_MAP_KEY) ? nullptr : a, b);
+  }
+};
+
+typedef RedBlackTree<arena_chunk_map_t, ArenaAvailTreeTrait> arena_avail_tree_t;
+typedef RedBlackTree<arena_chunk_map_t, ArenaRunTreeTrait> arena_run_tree_t;
 
 /* Arena chunk header. */
 struct arena_chunk_t {
 	/* Arena that owns the chunk. */
 	arena_t		*arena;
 
 	/* Linkage for the arena's tree of dirty chunks. */
 	RedBlackTreeNode<arena_chunk_t> link_dirty;
@@ -729,17 +813,33 @@ struct arena_chunk_t {
 #endif
 
 	/* Number of dirty pages. */
 	size_t		ndirty;
 
 	/* Map of pages within chunk that keeps track of free/large/small. */
 	arena_chunk_map_t map[1]; /* Dynamically sized. */
 };
-typedef RedBlackTree<arena_chunk_t> arena_chunk_tree_t;
+
+struct ArenaDirtyChunkTrait
+{
+  static RedBlackTreeNode<arena_chunk_t>& GetTreeNode(arena_chunk_t* aThis)
+  {
+    return aThis->link_dirty;
+  }
+
+  static inline int Compare(arena_chunk_t* a, arena_chunk_t* b)
+  {
+    MOZ_ASSERT(a);
+    MOZ_ASSERT(b);
+    return CompareAddr(a, b);
+  }
+};
+
+typedef RedBlackTree<arena_chunk_t, ArenaDirtyChunkTrait> arena_chunk_tree_t;
 
 #ifdef MALLOC_DOUBLE_PURGE
 namespace mozilla {
 
 template<>
 struct GetDoublyLinkedListElement<arena_chunk_t>
 {
   static DoublyLinkedListElement<arena_chunk_t>& Get(arena_chunk_t* aThis)
@@ -930,17 +1030,32 @@ public:
 
   bool RallocGrowLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize, size_t aOldSize);
 
   void Purge(bool aAll);
 
   void HardPurge();
 };
 
-typedef RedBlackTree<arena_t> arena_tree_t;
+struct ArenaTreeTrait
+{
+  static RedBlackTreeNode<arena_t>& GetTreeNode(arena_t* aThis)
+  {
+    return aThis->mLink;
+  }
+
+  static inline int Compare(arena_t* a, arena_t* b)
+  {
+    MOZ_ASSERT(a);
+    MOZ_ASSERT(b);
+    return (a->mId > b->mId) - (a->mId < b->mId);
+  }
+};
+
+typedef RedBlackTree<arena_t, ArenaTreeTrait> arena_tree_t;
 
 /********/
 /*
  * Chunks.
  */
 
 static malloc_rtree_t *chunk_rtree;
 
@@ -948,24 +1063,24 @@ static malloc_rtree_t *chunk_rtree;
 static malloc_mutex_t	chunks_mtx;
 
 /*
  * Trees of chunks that were previously allocated (trees differ only in node
  * ordering).  These are used when allocating chunks, in an attempt to re-use
  * address space.  Depending on function, different tree orderings are needed,
  * which is why there are two trees with the same contents.
  */
-static extent_tree_t	chunks_szad_mmap;
-static extent_tree_t	chunks_ad_mmap;
+static RedBlackTree<extent_node_t, ExtentTreeSzTrait> chunks_szad_mmap;
+static RedBlackTree<extent_node_t, ExtentTreeTrait> chunks_ad_mmap;
 
 /* Protects huge allocation-related data structures. */
 static malloc_mutex_t	huge_mtx;
 
 /* Tree of chunks that are stand-alone huge allocations. */
-static extent_tree_t	huge;
+static RedBlackTree<extent_node_t, ExtentTreeTrait> huge;
 
 /* Huge allocation statistics. */
 static uint64_t		huge_nmalloc;
 static uint64_t		huge_ndalloc;
 static size_t		huge_allocated;
 static size_t		huge_mapped;
 
 /****************************/
@@ -1478,84 +1593,16 @@ base_node_dealloc(extent_node_t *node)
 	malloc_mutex_unlock(&base_mtx);
 }
 
 /*
  * End Utility functions/macros.
  */
 /******************************************************************************/
 /*
- * Begin extent tree code.
- */
-
-static inline int
-extent_szad_comp(extent_node_t *a, extent_node_t *b)
-{
-	int ret;
-	size_t a_size = a->size;
-	size_t b_size = b->size;
-
-	ret = (a_size > b_size) - (a_size < b_size);
-	if (ret == 0) {
-		uintptr_t a_addr = (uintptr_t)a->addr;
-		uintptr_t b_addr = (uintptr_t)b->addr;
-
-		ret = (a_addr > b_addr) - (a_addr < b_addr);
-	}
-
-	return (ret);
-}
-
-/* Wrap red-black tree macros in functions. */
-rb_wrap(extent_tree_szad_, extent_tree_t, extent_node_t,
-    link_szad, extent_szad_comp)
-
-static inline int
-extent_ad_comp(extent_node_t *a, extent_node_t *b)
-{
-	uintptr_t a_addr = (uintptr_t)a->addr;
-	uintptr_t b_addr = (uintptr_t)b->addr;
-
-	return ((a_addr > b_addr) - (a_addr < b_addr));
-}
-
-/* Wrap red-black tree macros in functions. */
-rb_wrap(extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
-    extent_ad_comp)
-
-static inline int
-extent_bounds_comp(extent_node_t* aKey, extent_node_t* aNode)
-{
-  uintptr_t key_addr = (uintptr_t)aKey->addr;
-  uintptr_t node_addr = (uintptr_t)aNode->addr;
-  size_t node_size = aNode->size;
-
-  // Is aKey within aNode?
-  if (node_addr <= key_addr && key_addr < node_addr + node_size) {
-    return 0;
-  }
-
-  return ((key_addr > node_addr) - (key_addr < node_addr));
-}
-
-/*
- * This is an expansion of just the search function from the rb_wrap macro.
- */
-static extent_node_t *
-extent_tree_bounds_search(extent_tree_t *tree, extent_node_t *key) {
-    extent_node_t *ret;
-    rb_search(extent_node_t, link_ad, extent_bounds_comp, tree, key, ret);
-    return ret;
-}
-
-/*
- * End extent tree code.
- */
-/******************************************************************************/
-/*
  * Begin chunk management functions.
  */
 
 #ifdef XP_WIN
 
 static void *
 pages_map(void *addr, size_t size)
 {
@@ -1988,18 +2035,19 @@ pages_purge(void *addr, size_t length, b
 	return JEMALLOC_MADV_ZEROS && err == 0;
 #    undef JEMALLOC_MADV_PURGE
 #    undef JEMALLOC_MADV_ZEROS
 #  endif
 #endif
 }
 
 static void *
-chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
-    size_t alignment, bool base, bool *zeroed)
+chunk_recycle(RedBlackTree<extent_node_t, ExtentTreeSzTrait> *chunks_szad,
+              RedBlackTree<extent_node_t, ExtentTreeTrait> *chunks_ad,
+              size_t size, size_t alignment, bool base, bool *zeroed)
 {
 	void *ret;
 	extent_node_t *node;
 	extent_node_t key;
 	size_t alloc_size, leadsize, trailsize;
 	ChunkType chunk_type;
 
 	if (base) {
@@ -2014,38 +2062,38 @@ chunk_recycle(extent_tree_t *chunks_szad
 
 	alloc_size = size + alignment - chunksize;
 	/* Beware size_t wrap-around. */
 	if (alloc_size < size)
 		return nullptr;
 	key.addr = nullptr;
 	key.size = alloc_size;
 	malloc_mutex_lock(&chunks_mtx);
-	node = extent_tree_szad_nsearch(chunks_szad, &key);
+	node = chunks_szad->SearchOrNext(&key);
 	if (!node) {
 		malloc_mutex_unlock(&chunks_mtx);
 		return nullptr;
 	}
 	leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
 	    (uintptr_t)node->addr;
 	MOZ_ASSERT(node->size >= leadsize + size);
 	trailsize = node->size - leadsize - size;
 	ret = (void *)((uintptr_t)node->addr + leadsize);
 	chunk_type = node->chunk_type;
 	if (zeroed) {
 		*zeroed = (chunk_type == ZEROED_CHUNK);
 	}
 	/* Remove node from the tree. */
-	extent_tree_szad_remove(chunks_szad, node);
-	extent_tree_ad_remove(chunks_ad, node);
+	chunks_szad->Remove(node);
+	chunks_ad->Remove(node);
 	if (leadsize != 0) {
 		/* Insert the leading space as a smaller chunk. */
 		node->size = leadsize;
-		extent_tree_szad_insert(chunks_szad, node);
-		extent_tree_ad_insert(chunks_ad, node);
+		chunks_szad->Insert(node);
+		chunks_ad->Insert(node);
 		node = nullptr;
 	}
 	if (trailsize != 0) {
 		/* Insert the trailing space as a smaller chunk. */
 		if (!node) {
 			/*
 			 * An additional node is required, but
 			 * base_node_alloc() can cause a new base chunk to be
@@ -2059,18 +2107,18 @@ chunk_recycle(extent_tree_t *chunks_szad
 				chunk_dealloc(ret, size, chunk_type);
 				return nullptr;
 			}
 			malloc_mutex_lock(&chunks_mtx);
 		}
 		node->addr = (void *)((uintptr_t)(ret) + size);
 		node->size = trailsize;
 		node->chunk_type = chunk_type;
-		extent_tree_szad_insert(chunks_szad, node);
-		extent_tree_ad_insert(chunks_ad, node);
+		chunks_szad->Insert(node);
+		chunks_ad->Insert(node);
 		node = nullptr;
 	}
 
 	recycled_size -= size;
 
 	malloc_mutex_unlock(&chunks_mtx);
 
 	if (node)
@@ -2154,18 +2202,19 @@ chunk_ensure_zero(void* ptr, size_t size
 
 		for (i = 0; i < size / sizeof(size_t); i++)
 			MOZ_ASSERT(p[i] == 0);
 	}
 #endif
 }
 
 static void
-chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
-    size_t size, ChunkType chunk_type)
+chunk_record(RedBlackTree<extent_node_t, ExtentTreeSzTrait> *chunks_szad,
+             RedBlackTree<extent_node_t, ExtentTreeTrait> *chunks_ad,
+             void *chunk, size_t size, ChunkType chunk_type)
 {
 	extent_node_t *xnode, *node, *prev, *xprev, key;
 
 	if (chunk_type != ZEROED_CHUNK) {
 		if (pages_purge(chunk, size, chunk_type == HUGE_CHUNK)) {
 			chunk_type = ZEROED_CHUNK;
 		}
 	}
@@ -2177,70 +2226,70 @@ chunk_record(extent_tree_t *chunks_szad,
 	 * held.
 	 */
 	xnode = base_node_alloc();
 	/* Use xprev to implement conditional deferred deallocation of prev. */
 	xprev = nullptr;
 
 	malloc_mutex_lock(&chunks_mtx);
 	key.addr = (void *)((uintptr_t)chunk + size);
-	node = extent_tree_ad_nsearch(chunks_ad, &key);
+	node = chunks_ad->SearchOrNext(&key);
 	/* Try to coalesce forward. */
 	if (node && node->addr == key.addr) {
 		/*
 		 * Coalesce chunk with the following address range.  This does
 		 * not change the position within chunks_ad, so only
 		 * remove/insert from/into chunks_szad.
 		 */
-		extent_tree_szad_remove(chunks_szad, node);
+		chunks_szad->Remove(node);
 		node->addr = chunk;
 		node->size += size;
 		if (node->chunk_type != chunk_type) {
 			node->chunk_type = RECYCLED_CHUNK;
 		}
-		extent_tree_szad_insert(chunks_szad, node);
+		chunks_szad->Insert(node);
 	} else {
 		/* Coalescing forward failed, so insert a new node. */
 		if (!xnode) {
 			/*
 			 * base_node_alloc() failed, which is an exceedingly
 			 * unlikely failure.  Leak chunk; its pages have
 			 * already been purged, so this is only a virtual
 			 * memory leak.
 			 */
 			goto label_return;
 		}
 		node = xnode;
 		xnode = nullptr; /* Prevent deallocation below. */
 		node->addr = chunk;
 		node->size = size;
 		node->chunk_type = chunk_type;
-		extent_tree_ad_insert(chunks_ad, node);
-		extent_tree_szad_insert(chunks_szad, node);
+		chunks_ad->Insert(node);
+		chunks_szad->Insert(node);
 	}
 
 	/* Try to coalesce backward. */
-	prev = extent_tree_ad_prev(chunks_ad, node);
+	prev = chunks_ad->Prev(node);
 	if (prev && (void *)((uintptr_t)prev->addr + prev->size) ==
 	    chunk) {
 		/*
 		 * Coalesce chunk with the previous address range.  This does
 		 * not change the position within chunks_ad, so only
 		 * remove/insert node from/into chunks_szad.
 		 */
-		extent_tree_szad_remove(chunks_szad, prev);
-		extent_tree_ad_remove(chunks_ad, prev);
-
-		extent_tree_szad_remove(chunks_szad, node);
+		chunks_szad->Remove(prev);
+		chunks_ad->Remove(prev);
+
+		chunks_szad->Remove(node);
 		node->addr = prev->addr;
 		node->size += prev->size;
 		if (node->chunk_type != prev->chunk_type) {
 			node->chunk_type = RECYCLED_CHUNK;
 		}
-		extent_tree_szad_insert(chunks_szad, node);
+		chunks_szad->Insert(node);
 
 		xprev = prev;
 	}
 
 	recycled_size += size;
 
 label_return:
 	malloc_mutex_unlock(&chunks_mtx);
@@ -2350,92 +2399,16 @@ choose_arena(size_t size)
   }
 #else
   ret = arenas[0];
 #endif
   MOZ_DIAGNOSTIC_ASSERT(ret);
   return (ret);
 }
 
-static inline int
-arena_comp(arena_t* a, arena_t* b)
-{
-  MOZ_ASSERT(a);
-  MOZ_ASSERT(b);
-
-  return (a->mId > b->mId) - (a->mId < b->mId);
-}
-
-/* Wrap red-black tree macros in functions. */
-rb_wrap(arena_tree_, arena_tree_t, arena_t, mLink, arena_comp)
-
-static inline int
-arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b)
-{
-	uintptr_t a_chunk = (uintptr_t)a;
-	uintptr_t b_chunk = (uintptr_t)b;
-
-	MOZ_ASSERT(a);
-	MOZ_ASSERT(b);
-
-	return ((a_chunk > b_chunk) - (a_chunk < b_chunk));
-}
-
-/* Wrap red-black tree macros in functions. */
-rb_wrap(arena_chunk_tree_dirty_, arena_chunk_tree_t,
-    arena_chunk_t, link_dirty, arena_chunk_comp)
-
-static inline int
-arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
-{
-	uintptr_t a_mapelm = (uintptr_t)a;
-	uintptr_t b_mapelm = (uintptr_t)b;
-
-	MOZ_ASSERT(a);
-	MOZ_ASSERT(b);
-
-	return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
-}
-
-/* Wrap red-black tree macros in functions. */
-rb_wrap(arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, link,
-    arena_run_comp)
-
-static inline int
-arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
-{
-	int ret;
-	size_t a_size = a->bits & ~pagesize_mask;
-	size_t b_size = b->bits & ~pagesize_mask;
-
-	ret = (a_size > b_size) - (a_size < b_size);
-	if (ret == 0) {
-		uintptr_t a_mapelm, b_mapelm;
-
-		if ((a->bits & CHUNK_MAP_KEY) == 0)
-			a_mapelm = (uintptr_t)a;
-		else {
-			/*
-			 * Treat keys as though they are lower than anything
-			 * else.
-			 */
-			a_mapelm = 0;
-		}
-		b_mapelm = (uintptr_t)b;
-
-		ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
-	}
-
-	return (ret);
-}
-
-/* Wrap red-black tree macros in functions. */
-rb_wrap(arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t, link,
-    arena_avail_comp)
-
 static inline void *
 arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
 {
 	void *ret;
 	unsigned i, mask, bit, regind;
 
 	MOZ_ASSERT(run->magic == ARENA_RUN_MAGIC);
 	MOZ_ASSERT(run->regs_minelm < bin->regs_mask_nelms);
@@ -2604,27 +2577,27 @@ arena_t::SplitRun(arena_run_t* aRun, siz
   old_ndirty = chunk->ndirty;
   run_ind = (unsigned)((uintptr_t(aRun) - uintptr_t(chunk)) >> pagesize_2pow);
   total_pages = (chunk->map[run_ind].bits & ~pagesize_mask) >> pagesize_2pow;
   need_pages = (aSize >> pagesize_2pow);
   MOZ_ASSERT(need_pages > 0);
   MOZ_ASSERT(need_pages <= total_pages);
   rem_pages = total_pages - need_pages;
 
-  arena_avail_tree_remove(&mRunsAvail, &chunk->map[run_ind]);
+  mRunsAvail.Remove(&chunk->map[run_ind]);
 
   /* Keep track of trailing unused pages for later use. */
   if (rem_pages > 0) {
     chunk->map[run_ind+need_pages].bits = (rem_pages <<
         pagesize_2pow) | (chunk->map[run_ind+need_pages].bits &
         pagesize_mask);
     chunk->map[run_ind+total_pages-1].bits = (rem_pages <<
         pagesize_2pow) | (chunk->map[run_ind+total_pages-1].bits &
         pagesize_mask);
-    arena_avail_tree_insert(&mRunsAvail, &chunk->map[run_ind+need_pages]);
+    mRunsAvail.Insert(&chunk->map[run_ind+need_pages]);
   }
 
   for (i = 0; i < need_pages; i++) {
     /*
      * Commit decommitted pages if necessary.  If a decommitted
      * page is encountered, commit all needed adjacent decommitted
      * pages in one operation, in order to reduce system call
      * overhead.
@@ -2689,17 +2662,17 @@ arena_t::SplitRun(arena_run_t* aRun, siz
    * pages only matters if the application tries to operate on an
    * interior pointer.
    */
   if (aLarge) {
     chunk->map[run_ind].bits |= aSize;
   }
 
   if (chunk->ndirty == 0 && old_ndirty > 0) {
-    arena_chunk_tree_dirty_remove(&mChunksDirty, chunk);
+    mChunksDirty.Remove(chunk);
   }
 }
 
 void
 arena_t::InitChunk(arena_chunk_t* aChunk, bool aZeroed)
 {
   size_t i;
   /* WARNING: The following relies on !aZeroed meaning "used to be an arena
@@ -2744,30 +2717,29 @@ arena_t::InitChunk(arena_chunk_t* aChunk
    * Start out decommitted, in order to force a closer correspondence
    * between dirty pages and committed untouched pages.
    */
   pages_decommit(run, arena_maxclass);
 #endif
   mStats.committed += arena_chunk_header_npages;
 
   /* Insert the run into the tree of available runs. */
-  arena_avail_tree_insert(&mRunsAvail,
-      &aChunk->map[arena_chunk_header_npages]);
+  mRunsAvail.Insert(&aChunk->map[arena_chunk_header_npages]);
 
 #ifdef MALLOC_DOUBLE_PURGE
   new (&aChunk->chunks_madvised_elem) mozilla::DoublyLinkedListElement<arena_chunk_t>();
 #endif
 }
 
 void
 arena_t::DeallocChunk(arena_chunk_t* aChunk)
 {
   if (mSpare) {
     if (mSpare->ndirty > 0) {
-      arena_chunk_tree_dirty_remove(&aChunk->arena->mChunksDirty, mSpare);
+      aChunk->arena->mChunksDirty.Remove(mSpare);
       mNumDirty -= mSpare->ndirty;
       mStats.committed -= mSpare->ndirty;
     }
 
 #ifdef MALLOC_DOUBLE_PURGE
     if (mChunksMAdvised.ElementProbablyInList(mSpare)) {
       mChunksMAdvised.remove(mSpare);
     }
@@ -2778,34 +2750,34 @@ arena_t::DeallocChunk(arena_chunk_t* aCh
     mStats.committed -= arena_chunk_header_npages;
   }
 
   /*
    * Remove run from the tree of available runs, so that the arena does not use it.
    * Dirty page flushing only uses the tree of dirty chunks, so leaving this
    * chunk in the chunks_* trees is sufficient for that purpose.
    */
-  arena_avail_tree_remove(&mRunsAvail, &aChunk->map[arena_chunk_header_npages]);
+  mRunsAvail.Remove(&aChunk->map[arena_chunk_header_npages]);
 
   mSpare = aChunk;
 }
 
 arena_run_t*
 arena_t::AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero)
 {
   arena_run_t* run;
   arena_chunk_map_t* mapelm;
   arena_chunk_map_t key;
 
   MOZ_ASSERT(aSize <= arena_maxclass);
   MOZ_ASSERT((aSize & pagesize_mask) == 0);
 
   /* Search the arena's chunks for the lowest best fit. */
   key.bits = aSize | CHUNK_MAP_KEY;
-  mapelm = arena_avail_tree_nsearch(&mRunsAvail, &key);
+  mapelm = mRunsAvail.SearchOrNext(&key);
   if (mapelm) {
     arena_chunk_t* chunk =
         (arena_chunk_t*)CHUNK_ADDR2BASE(mapelm);
     size_t pageind = (uintptr_t(mapelm) - uintptr_t(chunk->map)) /
         sizeof(arena_chunk_map_t);
 
     run = (arena_run_t*)(uintptr_t(chunk) + (pageind << pagesize_2pow));
     SplitRun(run, aSize, aLarge, aZero);
@@ -2813,17 +2785,17 @@ arena_t::AllocRun(arena_bin_t* aBin, siz
   }
 
   if (mSpare) {
     /* Use the spare. */
     arena_chunk_t* chunk = mSpare;
     mSpare = nullptr;
     run = (arena_run_t*)(uintptr_t(chunk) + (arena_chunk_header_npages << pagesize_2pow));
     /* Insert the run into the tree of available runs. */
-    arena_avail_tree_insert(&mRunsAvail, &chunk->map[arena_chunk_header_npages]);
+    mRunsAvail.Insert(&chunk->map[arena_chunk_header_npages]);
     SplitRun(run, aSize, aLarge, aZero);
     return run;
   }
 
   /*
    * No usable runs.  Create a new chunk from which to allocate
    * the run.
    */
@@ -2847,34 +2819,34 @@ void
 arena_t::Purge(bool aAll)
 {
   arena_chunk_t* chunk;
   size_t i, npages;
   /* If all is set purge all dirty pages. */
   size_t dirty_max = aAll ? 1 : mMaxDirty;
 #ifdef MOZ_DEBUG
   size_t ndirty = 0;
-  rb_foreach_begin(arena_chunk_t, link_dirty, &mChunksDirty, chunk) {
+  rb_foreach_begin(arena_chunk_t, ArenaChunkLinkDirty::GetTreeNode, &mChunksDirty, chunk) {
     ndirty += chunk->ndirty;
-  } rb_foreach_end(arena_chunk_t, link_dirty, &mChunksDirty, chunk)
+  } rb_foreach_end(arena_chunk_t, ArenaChunkLinkDirty::GetTreeNode, &mChunksDirty, chunk)
   MOZ_ASSERT(ndirty == mNumDirty);
 #endif
   MOZ_DIAGNOSTIC_ASSERT(aAll || (mNumDirty > mMaxDirty));
 
   /*
    * Iterate downward through chunks until enough dirty memory has been
    * purged.  Terminate as soon as possible in order to minimize the
    * number of system calls, even if a chunk has only been partially
    * purged.
    */
   while (mNumDirty > (dirty_max >> 1)) {
 #ifdef MALLOC_DOUBLE_PURGE
     bool madvised = false;
 #endif
-    chunk = arena_chunk_tree_dirty_last(&mChunksDirty);
+    chunk = mChunksDirty.Last();
     MOZ_DIAGNOSTIC_ASSERT(chunk);
 
     for (i = chunk_npages - 1; chunk->ndirty > 0; i--) {
       MOZ_DIAGNOSTIC_ASSERT(i >= arena_chunk_header_npages);
 
       if (chunk->map[i].bits & CHUNK_MAP_DIRTY) {
 #ifdef MALLOC_DECOMMIT
         const size_t free_operation = CHUNK_MAP_DECOMMITTED;
@@ -2912,17 +2884,17 @@ arena_t::Purge(bool aAll)
 #endif
         if (mNumDirty <= (dirty_max >> 1)) {
           break;
         }
       }
     }
 
     if (chunk->ndirty == 0) {
-      arena_chunk_tree_dirty_remove(&mChunksDirty, chunk);
+      mChunksDirty.Remove(chunk);
     }
 #ifdef MALLOC_DOUBLE_PURGE
     if (madvised) {
       /* The chunk might already be in the list, but this
        * makes sure it's at the front. */
       if (mChunksMAdvised.ElementProbablyInList(chunk)) {
         mChunksMAdvised.remove(chunk);
       }
@@ -2954,18 +2926,17 @@ arena_t::DallocRun(arena_run_t* aRun, bo
 
     for (i = 0; i < run_pages; i++) {
       MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY)
           == 0);
       chunk->map[run_ind + i].bits = CHUNK_MAP_DIRTY;
     }
 
     if (chunk->ndirty == 0) {
-      arena_chunk_tree_dirty_insert(&mChunksDirty,
-          chunk);
+      mChunksDirty.Insert(chunk);
     }
     chunk->ndirty += run_pages;
     mNumDirty += run_pages;
   } else {
     size_t i;
 
     for (i = 0; i < run_pages; i++) {
       chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE |
@@ -2982,18 +2953,17 @@ arena_t::DallocRun(arena_run_t* aRun, bo
       (chunk->map[run_ind+run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) {
     size_t nrun_size = chunk->map[run_ind+run_pages].bits &
         ~pagesize_mask;
 
     /*
      * Remove successor from tree of available runs; the coalesced run is
      * inserted later.
      */
-    arena_avail_tree_remove(&mRunsAvail,
-        &chunk->map[run_ind+run_pages]);
+    mRunsAvail.Remove(&chunk->map[run_ind+run_pages]);
 
     size += nrun_size;
     run_pages = size >> pagesize_2pow;
 
     MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind+run_pages-1].bits & ~pagesize_mask)
         == nrun_size);
     chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
         pagesize_mask);
@@ -3007,31 +2977,31 @@ arena_t::DallocRun(arena_run_t* aRun, bo
     size_t prun_size = chunk->map[run_ind-1].bits & ~pagesize_mask;
 
     run_ind -= prun_size >> pagesize_2pow;
 
     /*
      * Remove predecessor from tree of available runs; the coalesced run is
      * inserted later.
      */
-    arena_avail_tree_remove(&mRunsAvail, &chunk->map[run_ind]);
+    mRunsAvail.Remove(&chunk->map[run_ind]);
 
     size += prun_size;
     run_pages = size >> pagesize_2pow;
 
     MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind].bits & ~pagesize_mask) ==
         prun_size);
     chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
         pagesize_mask);
     chunk->map[run_ind+run_pages-1].bits = size |
         (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
   }
 
   /* Insert into tree of available runs, now that coalescing is complete. */
-  arena_avail_tree_insert(&mRunsAvail, &chunk->map[run_ind]);
+  mRunsAvail.Insert(&chunk->map[run_ind]);
 
   /* Deallocate chunk if it is now completely unused. */
   if ((chunk->map[arena_chunk_header_npages].bits & (~pagesize_mask |
       CHUNK_MAP_ALLOCATED)) == arena_maxclass) {
     DeallocChunk(chunk);
   }
 
   /* Enforce mMaxDirty. */
@@ -3085,20 +3055,20 @@ arena_t::TrimRunTail(arena_chunk_t* aChu
 arena_run_t*
 arena_t::GetNonFullBinRun(arena_bin_t* aBin)
 {
   arena_chunk_map_t* mapelm;
   arena_run_t* run;
   unsigned i, remainder;
 
   /* Look for a usable run. */
-  mapelm = arena_run_tree_first(&aBin->runs);
+  mapelm = aBin->runs.First();
   if (mapelm) {
     /* run is guaranteed to have available space. */
-    arena_run_tree_remove(&aBin->runs, mapelm);
+    aBin->runs.Remove(mapelm);
     run = (arena_run_t*)(mapelm->bits & ~pagesize_mask);
     return run;
   }
   /* No existing runs have any space available. */
 
   /* Allocate a new run. */
   run = AllocRun(aBin, aBin->run_size, false, false);
   if (!run)
@@ -3564,17 +3534,17 @@ isalloc_validate(const void* ptr)
   } else {
     size_t ret;
     extent_node_t* node;
     extent_node_t key;
 
     /* Chunk. */
     key.addr = (void*)chunk;
     malloc_mutex_lock(&huge_mtx);
-    node = extent_tree_ad_search(&huge, &key);
+    node = huge.Search(&key);
     if (node)
       ret = node->size;
     else
       ret = 0;
     malloc_mutex_unlock(&huge_mtx);
     return ret;
   }
 }
@@ -3597,17 +3567,17 @@ isalloc(const void *ptr)
 		extent_node_t *node, key;
 
 		/* Chunk (huge allocation). */
 
 		malloc_mutex_lock(&huge_mtx);
 
 		/* Extract from tree of huge allocations. */
 		key.addr = const_cast<void*>(ptr);
-		node = extent_tree_ad_search(&huge, &key);
+		node = huge.Search(&key);
 		MOZ_DIAGNOSTIC_ASSERT(node);
 
 		ret = node->size;
 
 		malloc_mutex_unlock(&huge_mtx);
 	}
 
 	return (ret);
@@ -3626,17 +3596,18 @@ MozJemalloc::jemalloc_ptr_info(const voi
 
   // Look for huge allocations before looking for |chunk| in chunk_rtree.
   // This is necessary because |chunk| won't be in chunk_rtree if it's
   // the second or subsequent chunk in a huge allocation.
   extent_node_t* node;
   extent_node_t key;
   malloc_mutex_lock(&huge_mtx);
   key.addr = const_cast<void*>(aPtr);
-  node = extent_tree_bounds_search(&huge, &key);
+  node = reinterpret_cast<
+    RedBlackTree<extent_node_t, ExtentTreeBoundsTrait>*>(&huge)->Search(&key);
   if (node) {
     *aInfo = { TagLiveHuge, node->addr, node->size };
   }
   malloc_mutex_unlock(&huge_mtx);
   if (node) {
     return;
   }
 
@@ -3765,18 +3736,18 @@ arena_t::DallocSmall(arena_chunk_t* aChu
     } else if (bin->nregs != 1) {
       size_t run_pageind = (uintptr_t(run) - uintptr_t(aChunk)) >> pagesize_2pow;
       arena_chunk_map_t* run_mapelm = &aChunk->map[run_pageind];
       /*
        * This block's conditional is necessary because if the
        * run only contains one region, then it never gets
        * inserted into the non-full runs tree.
        */
-      MOZ_DIAGNOSTIC_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) == run_mapelm);
-      arena_run_tree_remove(&bin->runs, run_mapelm);
+      MOZ_DIAGNOSTIC_ASSERT(bin->runs.Search(run_mapelm) == run_mapelm);
+      bin->runs.Remove(run_mapelm);
     }
 #if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
     run->magic = 0;
 #endif
     DallocRun(run, true);
     bin->stats.curruns--;
   } else if (run->nfree == 1 && run != bin->runcur) {
     /*
@@ -3788,26 +3759,26 @@ arena_t::DallocSmall(arena_chunk_t* aChu
     } else if (uintptr_t(run) < uintptr_t(bin->runcur)) {
       /* Switch runcur. */
       if (bin->runcur->nfree > 0) {
         arena_chunk_t* runcur_chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(bin->runcur);
         size_t runcur_pageind = (uintptr_t(bin->runcur) - uintptr_t(runcur_chunk)) >> pagesize_2pow;
         arena_chunk_map_t* runcur_mapelm = &runcur_chunk->map[runcur_pageind];
 
         /* Insert runcur. */
-        MOZ_DIAGNOSTIC_ASSERT(!arena_run_tree_search(&bin->runs, runcur_mapelm));
-        arena_run_tree_insert(&bin->runs, runcur_mapelm);
+        MOZ_DIAGNOSTIC_ASSERT(!bin->runs.Search(runcur_mapelm));
+        bin->runs.Insert(runcur_mapelm);
       }
       bin->runcur = run;
     } else {
       size_t run_pageind = (uintptr_t(run) - uintptr_t(aChunk)) >> pagesize_2pow;
       arena_chunk_map_t *run_mapelm = &aChunk->map[run_pageind];
 
-      MOZ_DIAGNOSTIC_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) == nullptr);
-      arena_run_tree_insert(&bin->runs, run_mapelm);
+      MOZ_DIAGNOSTIC_ASSERT(bin->runs.Search(run_mapelm) == nullptr);
+      bin->runs.Insert(run_mapelm);
     }
   }
   mStats.allocated_small -= size;
 }
 
 void
 arena_t::DallocLarge(arena_chunk_t* aChunk, void* aPtr)
 {
@@ -4047,63 +4018,63 @@ arena_t::Init()
 
   if (malloc_spin_init(&mLock))
     return true;
 
   memset(&mLink, 0, sizeof(mLink));
   memset(&mStats, 0, sizeof(arena_stats_t));
 
   /* Initialize chunks. */
-  arena_chunk_tree_dirty_new(&mChunksDirty);
+  mChunksDirty.Init();
 #ifdef MALLOC_DOUBLE_PURGE
   new (&mChunksMAdvised) mozilla::DoublyLinkedList<arena_chunk_t>();
 #endif
   mSpare = nullptr;
 
   mNumDirty = 0;
   // Reduce the maximum amount of dirty pages we allow to be kept on
   // thread local arenas. TODO: make this more flexible.
   mMaxDirty = opt_dirty_max >> 3;
 
-  arena_avail_tree_new(&mRunsAvail);
+  mRunsAvail.Init();
 
   /* Initialize bins. */
   prev_run_size = pagesize;
 
   /* (2^n)-spaced tiny bins. */
   for (i = 0; i < ntbins; i++) {
     bin = &mBins[i];
     bin->runcur = nullptr;
-    arena_run_tree_new(&bin->runs);
+    bin->runs.Init();
 
     bin->reg_size = (1ULL << (TINY_MIN_2POW + i));
 
     prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
 
     memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
   }
 
   /* Quantum-spaced bins. */
   for (; i < ntbins + nqbins; i++) {
     bin = &mBins[i];
     bin->runcur = nullptr;
-    arena_run_tree_new(&bin->runs);
+    bin->runs.Init();
 
     bin->reg_size = quantum * (i - ntbins + 1);
 
     prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
 
     memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
   }
 
   /* (2^n)-spaced sub-page bins. */
   for (; i < ntbins + nqbins + nsbins; i++) {
     bin = &mBins[i];
     bin->runcur = nullptr;
-    arena_run_tree_new(&bin->runs);
+    bin->runs.Init();
 
     bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
 
     prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
 
     memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
   }
 
@@ -4149,17 +4120,17 @@ arenas_extend()
   if (!ret || ret->Init()) {
     return arenas_fallback();
   }
 
   malloc_spin_lock(&arenas_lock);
 
   // TODO: Use random Ids.
   ret->mId = narenas;
-  arena_tree_insert(&gArenaTree, ret);
+  gArenaTree.Insert(ret);
 
   /* Allocate and initialize arenas. */
   if (narenas % arenas_growth == 0) {
     size_t max_arenas = ((narenas + arenas_growth) / arenas_growth) * arenas_growth;
     /*
      * We're unfortunately leaking the previous allocation ;
      * the base allocator doesn't know how to free things
      */
@@ -4229,17 +4200,17 @@ huge_palloc(size_t size, size_t alignmen
 	}
 
 	/* Insert node into huge. */
 	node->addr = ret;
 	psize = PAGE_CEILING(size);
 	node->size = psize;
 
 	malloc_mutex_lock(&huge_mtx);
-	extent_tree_ad_insert(&huge, node);
+	huge.Insert(node);
 	huge_nmalloc++;
 
         /* Although we allocated space for csize bytes, we indicate that we've
          * allocated only psize bytes.
          *
          * If DECOMMIT is defined, this is a reasonable thing to do, since
          * we'll explicitly decommit the bytes in excess of psize.
          *
@@ -4302,17 +4273,17 @@ huge_ralloc(void *ptr, size_t size, size
 			extent_node_t *node, key;
 
 			pages_decommit((void *)((uintptr_t)ptr + psize),
 			    oldsize - psize);
 
 			/* Update recorded size. */
 			malloc_mutex_lock(&huge_mtx);
 			key.addr = const_cast<void*>(ptr);
-			node = extent_tree_ad_search(&huge, &key);
+			node = huge.Search(&key);
 			MOZ_ASSERT(node);
 			MOZ_ASSERT(node->size == oldsize);
 			huge_allocated -= oldsize - psize;
 			/* No need to change huge_mapped, because we didn't
 			 * (un)map anything. */
 			node->size = psize;
 			malloc_mutex_unlock(&huge_mtx);
 		} else if (psize > oldsize) {
@@ -4327,17 +4298,17 @@ huge_ralloc(void *ptr, size_t size, size
                  * so malloc_usable_size doesn't return a value smaller than
                  * what was requested via realloc(). */
 
                 if (psize > oldsize) {
                         /* Update recorded size. */
                         extent_node_t *node, key;
                         malloc_mutex_lock(&huge_mtx);
                         key.addr = const_cast<void*>(ptr);
-                        node = extent_tree_ad_search(&huge, &key);
+                        node = huge.Search(&key);
                         MOZ_ASSERT(node);
                         MOZ_ASSERT(node->size == oldsize);
                         huge_allocated += psize - oldsize;
 			/* No need to change huge_mapped, because we didn't
 			 * (un)map anything. */
                         node->size = psize;
                         malloc_mutex_unlock(&huge_mtx);
                 }
@@ -4373,20 +4344,20 @@ static void
 huge_dalloc(void *ptr)
 {
 	extent_node_t *node, key;
 
 	malloc_mutex_lock(&huge_mtx);
 
 	/* Extract from tree of huge allocations. */
 	key.addr = ptr;
-	node = extent_tree_ad_search(&huge, &key);
+	node = huge.Search(&key);
 	MOZ_ASSERT(node);
 	MOZ_ASSERT(node->addr == ptr);
-	extent_tree_ad_remove(&huge, node);
+	huge.Remove(node);
 
 	huge_ndalloc++;
 	huge_allocated -= node->size;
 	huge_mapped -= CHUNK_CEILING(node->size);
 
 	malloc_mutex_unlock(&huge_mtx);
 
 	/* Unmap chunk. */
@@ -4621,39 +4592,39 @@ MALLOC_OUT:
   /* Various sanity checks that regard configuration. */
   MOZ_ASSERT(quantum >= sizeof(void *));
   MOZ_ASSERT(quantum <= pagesize);
   MOZ_ASSERT(chunksize >= pagesize);
   MOZ_ASSERT(quantum * 4 <= chunksize);
 
   /* Initialize chunks data. */
   malloc_mutex_init(&chunks_mtx);
-  extent_tree_szad_new(&chunks_szad_mmap);
-  extent_tree_ad_new(&chunks_ad_mmap);
+  chunks_szad_mmap.Init();
+  chunks_ad_mmap.Init();
 
   /* Initialize huge allocation data. */
   malloc_mutex_init(&huge_mtx);
-  extent_tree_ad_new(&huge);
+  huge.Init();
   huge_nmalloc = 0;
   huge_ndalloc = 0;
   huge_allocated = 0;
   huge_mapped = 0;
 
   /* Initialize base allocation data structures. */
   base_mapped = 0;
   base_committed = 0;
   base_nodes = nullptr;
   malloc_mutex_init(&base_mtx);
 
   malloc_spin_init(&arenas_lock);
 
   /*
    * Initialize one arena here.
    */
-  arena_tree_new(&gArenaTree);
+  gArenaTree.Init();
   arenas_extend();
   if (!arenas || !arenas[0]) {
 #ifndef XP_WIN
     malloc_mutex_unlock(&init_lock);
 #endif
     return true;
   }
   /* arena_t::Init() sets this to a lower value for thread local arenas;
@@ -5037,20 +5008,20 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
                       arena->mStats.allocated_large;
 
     arena_dirty = arena->mNumDirty << pagesize_2pow;
 
     for (j = 0; j < ntbins + nqbins + nsbins; j++) {
       arena_bin_t* bin = &arena->mBins[j];
       size_t bin_unused = 0;
 
-      rb_foreach_begin(arena_chunk_map_t, link, &bin->runs, mapelm) {
+      rb_foreach_begin(arena_chunk_map_t, ArenaChunkMapLink::GetTreeNode, &bin->runs, mapelm) {
         run = (arena_run_t*)(mapelm->bits & ~pagesize_mask);
         bin_unused += run->nfree * bin->reg_size;
-      } rb_foreach_end(arena_chunk_map_t, link, &bin->runs, mapelm)
+      } rb_foreach_end(arena_chunk_map_t, ArenaChunkMapLink::GetTreeNode, &bin->runs, mapelm)
 
       if (bin->runcur) {
         bin_unused += bin->runcur->nfree * bin->reg_size;
       }
 
       arena_unused += bin_unused;
       arena_headers += bin->stats.curruns * bin->reg0_offset;
     }
@@ -5173,17 +5144,17 @@ MozJemalloc::jemalloc_free_dirty_pages(v
 }
 
 inline arena_t*
 arena_t::GetById(arena_id_t aArenaId)
 {
   arena_t key;
   key.mId = aArenaId;
   malloc_spin_lock(&arenas_lock);
-  arena_t* result = arena_tree_search(&gArenaTree, &key);
+  arena_t* result = gArenaTree.Search(&key);
   malloc_spin_unlock(&arenas_lock);
   MOZ_RELEASE_ASSERT(result);
   return result;
 }
 
 #ifdef NIGHTLY_BUILD
 template<> inline arena_id_t
 MozJemalloc::moz_create_arena()
@@ -5192,17 +5163,17 @@ MozJemalloc::moz_create_arena()
   return arena->mId;
 }
 
 template<> inline void
 MozJemalloc::moz_dispose_arena(arena_id_t aArenaId)
 {
   arena_t* arena = arena_t::GetById(aArenaId);
   malloc_spin_lock(&arenas_lock);
-  arena_tree_remove(&gArenaTree, arena);
+  gArenaTree.Remove(arena);
   // The arena is leaked, and remaining allocations in it still are alive
   // until they are freed. After that, the arena will be empty but still
   // taking have at least a chunk taking address space. TODO: bug 1364359.
   malloc_spin_unlock(&arenas_lock);
 }
 
 #define MALLOC_DECL(name, return_type, ...) \
   template<> inline return_type \
--- a/memory/build/rb.h
+++ b/memory/build/rb.h
@@ -31,44 +31,47 @@
  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  ******************************************************************************
  *
- * cpp macro implementation of left-leaning red-black trees.
+ * C++ template implementation of left-leaning red-black trees.
  *
  * Usage:
  *
  *   #define SIZEOF_PTR ...
  *   #define SIZEOF_PTR_2POW ...
  *
  *   #include <rb.h>
  *   ...
  *
  * All operations are done non-recursively.  Parent pointers are not used, and
  * color bits are stored in the least significant bit of right-child pointers,
  * thus making node linkage as compact as is possible for red-black trees.
  *
- * Some macros use a comparison function pointer, which is expected to have the
- * following prototype:
- *
- *   int (a_cmp *)(a_type *a_node, a_type *a_other);
- *                         ^^^^^^
- *                      or a_key
+ * The RedBlackTree template expects two type arguments: the type of the nodes,
+ * containing a RedBlackTreeNode, and a trait providing two methods:
+ *  - a GetTreeNode method that returns a reference to the RedBlackTreeNode
+ *    corresponding to a given node with the following signature:
+ *      static RedBlackTreeNode<T>& GetTreeNode(T*)
+ *  - a Compare function with the following signature:
+ *      static int Compare(T* aNode, T* aOther)
+ *                            ^^^^^
+ *                         or aKey
  *
  * Interpretation of comparision function return values:
  *
- *   -1 : a_node <  a_other
- *    0 : a_node == a_other
- *    1 : a_node >  a_other
+ *   -1 : aNode <  aOther
+ *    0 : aNode == aOther
+ *    1 : aNode >  aOther
  *
- * In all cases, the a_node or a_key macro argument is the first argument to the
+ * In all cases, the aNode or aKey argument is the first argument to the
  * comparison function, which makes it possible to write comparison functions
  * that treat the first argument specially.
  *
  ******************************************************************************/
 
 #ifndef RB_H_
 #define RB_H_
 
@@ -122,25 +125,17 @@ public:
   }
 
   void SetColor(NodeColor aColor)
   {
     mRightAndColor = (mRightAndColor & uintptr_t(~1)) | aColor;
   }
 };
 
-/* Root structure. */
-template <typename T>
-struct RedBlackTree
-{
-  T* rbt_root;
-  T rbt_nil;
-};
-
-#define rb_node_field(a_node, a_field) (a_node)->a_field
+#define rb_node_field(a_node, a_field) a_field(a_node)
 
 /* Node initializer. */
 #define rbp_node_new(a_type, a_field, a_tree, a_node)                          \
   do {                                                                         \
     rb_node_field((a_node), a_field).SetLeft(&(a_tree)->rbt_nil);              \
     rb_node_field((a_node), a_field).SetRight(&(a_tree)->rbt_nil);             \
     rb_node_field((a_node), a_field).SetColor(NodeColor::Red);                 \
   } while (0)
@@ -660,74 +655,83 @@ struct RedBlackTree
           }                                                                    \
         }                                                                      \
       }                                                                        \
     }                                                                          \
     /* Update root.                                                   */       \
     (a_tree)->rbt_root = rb_node_field(&rbp_r_s, a_field).Left();              \
   } while (0)
 
-/*
- * The rb_wrap() macro provides a convenient way to wrap functions around the
- * cpp macros.  The main benefits of wrapping are that 1) repeated macro
- * expansion can cause code bloat, especially for rb_{insert,remove)(), and
- * 2) type, linkage, comparison functions, etc. need not be specified at every
- * call point.
- */
+/* Tree structure. */
+template<typename T, typename Trait>
+struct RedBlackTree
+{
+  T* rbt_root;
+  T rbt_nil;
+
+  void Init()
+  {
+    rb_new(T, Trait::GetTreeNode, this);
+  }
+
+  T* First()
+  {
+    T* ret;
+    rb_first(T, Trait::GetTreeNode, this, ret);
+    return ret;
+  }
+
+  T* Last()
+  {
+    T* ret;
+    rb_last(T, Trait::GetTreeNode, this, ret);
+    return ret;
+  }
+
+  T* Next(T* aNode)
+  {
+    T* ret;
+    rb_next(T, Trait::GetTreeNode, Trait::Compare, this, aNode, ret);
+    return ret;
+  }
 
-#define rb_wrap(a_prefix, a_tree_type, a_type, a_field, a_cmp)                 \
-  static void a_prefix##new (a_tree_type * tree)                               \
-  {                                                                            \
-    rb_new(a_type, a_field, tree);                                             \
-  }                                                                            \
-  static a_type* a_prefix##first(a_tree_type* tree)                            \
-  {                                                                            \
-    a_type* ret;                                                               \
-    rb_first(a_type, a_field, tree, ret);                                      \
-    return (ret);                                                              \
-  }                                                                            \
-  static a_type* a_prefix##last(a_tree_type* tree)                             \
-  {                                                                            \
-    a_type* ret;                                                               \
-    rb_last(a_type, a_field, tree, ret);                                       \
-    return (ret);                                                              \
-  }                                                                            \
-  static a_type* a_prefix##next(a_tree_type* tree, a_type* node)               \
-  {                                                                            \
-    a_type* ret;                                                               \
-    rb_next(a_type, a_field, a_cmp, tree, node, ret);                          \
-    return (ret);                                                              \
-  }                                                                            \
-  static a_type* a_prefix##prev(a_tree_type* tree, a_type* node)               \
-  {                                                                            \
-    a_type* ret;                                                               \
-    rb_prev(a_type, a_field, a_cmp, tree, node, ret);                          \
-    return (ret);                                                              \
-  }                                                                            \
-  static a_type* a_prefix##search(a_tree_type* tree, a_type* key)              \
-  {                                                                            \
-    a_type* ret;                                                               \
-    rb_search(a_type, a_field, a_cmp, tree, key, ret);                         \
-    return (ret);                                                              \
-  }                                                                            \
-  static a_type* a_prefix##nsearch(a_tree_type* tree, a_type* key)             \
-  {                                                                            \
-    a_type* ret;                                                               \
-    rb_nsearch(a_type, a_field, a_cmp, tree, key, ret);                        \
-    return (ret);                                                              \
-  }                                                                            \
-  static void a_prefix##insert(a_tree_type* tree, a_type* node)                \
-  {                                                                            \
-    rb_insert(a_type, a_field, a_cmp, tree, node);                             \
-  }                                                                            \
-  static void a_prefix##remove(a_tree_type* tree, a_type* node)                \
-  {                                                                            \
-    rb_remove(a_type, a_field, a_cmp, tree, node);                             \
+  T* Prev(T* aNode)
+  {
+    T* ret;
+    rb_prev(T, Trait::GetTreeNode, Trait::Compare, this, aNode, ret);
+    return ret;
+  }
+
+  T* Search(T* aKey)
+  {
+    T* ret;
+    rb_search(T, Trait::GetTreeNode, Trait::Compare, this, aKey, ret);
+    return ret;
   }
 
+  /* Find a match if it exists. Otherwise, find the next greater node, if one
+   * exists */
+  T* SearchOrNext(T* aKey)
+  {
+    T* ret;
+    rb_nsearch(T, Trait::GetTreeNode, Trait::Compare, this, aKey, ret);
+    return ret;
+  }
+
+  void Insert(T* aNode)
+  {
+    rb_insert(T, Trait::GetTreeNode, Trait::Compare, this, aNode);
+  }
+
+  void Remove(T* aNode)
+  {
+    rb_remove(T, Trait::GetTreeNode, Trait::Compare, this, aNode);
+  }
+};
+
 /*
  * The iterators simulate recursion via an array of pointers that store the
  * current path.  This is critical to performance, since a series of calls to
  * rb_{next,prev}() would require time proportional to (n lg n), whereas this
  * implementation only requires time proportional to (n).
  *
  * Since the iterators cache a path down the tree, any tree modification may
  * cause the cached path to become invalid.  In order to continue iteration,