Bug 1403444 - Replace the rb_foreach_* macros with a range iterator. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Wed, 27 Sep 2017 15:25:19 +0900
changeset 670938 45e6d757ef72f59b9ee9294183b4cc8ad445e48f
parent 670936 c21e81f77ae1b167d9db5435bbd75ba30572c81e
child 670940 f60d47bb0826f9295abbd91740bf402376f1e1b9
push id81768
push userbmo:mh+mozilla@glandium.org
push dateWed, 27 Sep 2017 06:58:47 +0000
reviewersnjn
bugs1403444
milestone58.0a1
Bug 1403444 - Replace the rb_foreach_* macros with a range iterator. r?njn
memory/build/mozjemalloc.cpp
memory/build/rb.h
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -296,18 +296,16 @@ void *_mmap(void *addr, size_t length, i
 #endif
 #endif
 }
 #define mmap _mmap
 #define munmap(a, l) syscall(SYS_munmap, a, l)
 #endif
 #endif
 
-#include "rb.h"
-
 #ifdef MOZ_DEBUG
    /* Disable inlining to make debugging easier. */
 #ifdef inline
 #undef inline
 #endif
 
 #  define inline
 #endif
@@ -320,16 +318,18 @@ void *_mmap(void *addr, size_t length, i
 #if defined(_WIN64) || defined(__LP64__)
 #  define SIZEOF_PTR_2POW       3
 #else
 #  define SIZEOF_PTR_2POW       2
 #endif
 
 #define	SIZEOF_PTR		(1U << SIZEOF_PTR_2POW)
 
+#include "rb.h"
+
 /* sizeof(int) == (1U << SIZEOF_INT_2POW). */
 #ifndef SIZEOF_INT_2POW
 #  define SIZEOF_INT_2POW	2
 #endif
 
 /*
  * Size and alignment of memory chunks that are allocated by the OS's virtual
  * memory system.
@@ -2819,19 +2819,19 @@ void
 arena_t::Purge(bool aAll)
 {
   arena_chunk_t* chunk;
   size_t i, npages;
   /* If all is set purge all dirty pages. */
   size_t dirty_max = aAll ? 1 : mMaxDirty;
 #ifdef MOZ_DEBUG
   size_t ndirty = 0;
-  rb_foreach_begin(arena_chunk_t, ArenaChunkLinkDirty::GetTreeNode, &mChunksDirty, chunk) {
+  for (auto chunk : mChunksDirty.iter()) {
     ndirty += chunk->ndirty;
-  } rb_foreach_end(arena_chunk_t, ArenaChunkLinkDirty::GetTreeNode, &mChunksDirty, chunk)
+  }
   MOZ_ASSERT(ndirty == mNumDirty);
 #endif
   MOZ_DIAGNOSTIC_ASSERT(aAll || (mNumDirty > mMaxDirty));
 
   /*
    * Iterate downward through chunks until enough dirty memory has been
    * purged.  Terminate as soon as possible in order to minimize the
    * number of system calls, even if a chunk has only been partially
@@ -4983,17 +4983,16 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
 
   malloc_spin_lock(&arenas_lock);
   /* Iterate over arenas. */
   for (i = 0; i < narenas; i++) {
     arena_t* arena = arenas[i];
     size_t arena_mapped, arena_allocated, arena_committed, arena_dirty, j,
            arena_unused, arena_headers;
     arena_run_t* run;
-    arena_chunk_map_t* mapelm;
 
     if (!arena) {
       continue;
     }
 
     arena_headers = 0;
     arena_unused = 0;
 
@@ -5008,20 +5007,20 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
                       arena->mStats.allocated_large;
 
     arena_dirty = arena->mNumDirty << pagesize_2pow;
 
     for (j = 0; j < ntbins + nqbins + nsbins; j++) {
       arena_bin_t* bin = &arena->mBins[j];
       size_t bin_unused = 0;
 
-      rb_foreach_begin(arena_chunk_map_t, ArenaChunkMapLink::GetTreeNode, &bin->runs, mapelm) {
+      for (auto mapelm : bin->runs.iter()) {
         run = (arena_run_t*)(mapelm->bits & ~pagesize_mask);
         bin_unused += run->nfree * bin->reg_size;
-      } rb_foreach_end(arena_chunk_map_t, ArenaChunkMapLink::GetTreeNode, &bin->runs, mapelm)
+      }
 
       if (bin->runcur) {
         bin_unused += bin->runcur->nfree * bin->reg_size;
       }
 
       arena_unused += bin_unused;
       arena_headers += bin->stats.curruns * bin->reg0_offset;
     }
--- a/memory/build/rb.h
+++ b/memory/build/rb.h
@@ -611,104 +611,122 @@ private:
         rbp_mrr_t = RotateLeft(aNode);
         Trait::GetTreeNode(node).SetRight(rbp_mrr_t);
       } else {
         node = RotateLeft(aNode);
       }
     }
     return node;
   }
+
+  /*
+   * The iterator simulate recursion via an array of pointers that store the
+   * current path.  This is critical to performance, since a series of calls to
+   * rb_{next,prev}() would require time proportional to (n lg n), whereas this
+   * implementation only requires time proportional to (n).
+   *
+   * Since the iterator caches a path down the tree, any tree modification may
+   * cause the cached path to become invalid. Don't modify the tree during an
+   * iteration.
+   */
+
+  /*
+   * Avoid using variable-length arrays, at the cost of using more stack space.
+   * Size the path arrays such that they are always large enough, even if a
+   * tree consumes all of memory.  Since each node must contain a minimum of
+   * two pointers, there can never be more nodes than:
+   *
+   *   1 << ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1))
+   *
+   * Since the depth of a tree is limited to 3*lg(#nodes), the maximum depth
+   * is:
+   *
+   *   (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
+   *
+   * This works out to a maximum depth of 87 and 180 for 32- and 64-bit
+   * systems, respectively (approximatly 348 and 1440 bytes, respectively).
+   */
+public:
+  class Iterator
+  {
+    T* mSentinel;
+    T* mPath[3 * ((SIZEOF_PTR << 3) - (SIZEOF_PTR_2POW + 1))];
+    unsigned mDepth;
+
+  public:
+    explicit Iterator(RedBlackTree<T, Trait>* aTree)
+      : mSentinel(&aTree->rbt_nil)
+      , mDepth(0)
+    {
+      /* Initialize the path to contain the left spine. */
+      if (aTree->rbt_root != mSentinel) {
+        T* node;
+        mPath[mDepth++] = aTree->rbt_root;
+        while ((node = Trait::GetTreeNode(mPath[mDepth - 1]).Left()) !=
+               mSentinel) {
+          mPath[mDepth++] = node;
+        }
+      }
+    }
+
+    class Item
+    {
+      Iterator* mIterator;
+      T* mItem;
+
+    public:
+      Item(Iterator* aIterator, T* aItem)
+        : mIterator(aIterator)
+        , mItem(aItem)
+      { }
+
+      bool operator!=(const Item& aOther) const
+      {
+        return (mIterator != aOther.mIterator) || (mItem != aOther.mItem);
+      }
+
+      T* operator*() const { return mItem; }
+
+      const Item& operator++()
+      {
+        mItem = mIterator->Next();
+        return *this;
+      }
+    };
+
+    Item begin()
+    {
+      return Item(this, mDepth > 0 ? mPath[mDepth - 1] : nullptr);
+    }
+
+    Item end()
+    {
+      return Item(this, nullptr);
+    }
+
+    T* Next()
+    {
+      T* node;
+      if ((node = Trait::GetTreeNode(mPath[mDepth - 1]).Right()) !=
+          mSentinel) {
+        /* The successor is the left-most node in the right subtree. */
+        mPath[mDepth++] = node;
+        while ((node = Trait::GetTreeNode(mPath[mDepth - 1]).Left()) !=
+               mSentinel) {
+          mPath[mDepth++] = node;
+        }
+      } else {
+        /* The successor is above the current node.  Unwind until a
+         * left-leaning edge is removed from the path, of the path is empty. */
+        for (mDepth--; mDepth > 0; mDepth--) {
+          if (Trait::GetTreeNode(mPath[mDepth - 1]).Left() == mPath[mDepth]) {
+            break;
+          }
+        }
+      }
+      return mDepth > 0 ? mPath[mDepth - 1] : nullptr;
+    }
+  };
+
+  Iterator iter() { return Iterator(this); }
 };
 
-/*
- * The iterators simulate recursion via an array of pointers that store the
- * current path.  This is critical to performance, since a series of calls to
- * rb_{next,prev}() would require time proportional to (n lg n), whereas this
- * implementation only requires time proportional to (n).
- *
- * Since the iterators cache a path down the tree, any tree modification may
- * cause the cached path to become invalid.  In order to continue iteration,
- * use something like the following sequence:
- *
- *   {
- *       a_type *node, *tnode;
- *
- *       rb_foreach_begin(a_type, a_field, a_tree, node) {
- *           ...
- *           rb_next(a_type, a_field, a_cmp, a_tree, node, tnode);
- *           rb_remove(a_type, a_field, a_cmp, a_tree, node);
- *           ...
- *       } rb_foreach_end(a_type, a_field, a_tree, node)
- *   }
- *
- * Note that this idiom is not advised if every iteration modifies the tree,
- * since in that case there is no algorithmic complexity improvement over a
- * series of rb_{next,prev}() calls, thus making the setup overhead wasted
- * effort.
- */
-
-/*
- * Avoid using variable-length arrays, at the cost of using more stack space.
- * Size the path arrays such that they are always large enough, even if a
- * tree consumes all of memory.  Since each node must contain a minimum of
- * two pointers, there can never be more nodes than:
- *
- *   1 << ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1))
- *
- * Since the depth of a tree is limited to 3*lg(#nodes), the maximum depth
- * is:
- *
- *   (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
- *
- * This works out to a maximum depth of 87 and 180 for 32- and 64-bit
- * systems, respectively (approximatly 348 and 1440 bytes, respectively).
- */
-#define rbp_f_height (3 * ((SIZEOF_PTR << 3) - (SIZEOF_PTR_2POW + 1)))
-
-#define rb_foreach_begin(a_type, a_field, a_tree, a_var)                       \
-  {                                                                            \
-    {                                                                          \
-      /* Initialize the path to contain the left spine.             */         \
-      a_type* rbp_f_path[rbp_f_height];                                        \
-      a_type* rbp_f_node;                                                      \
-      unsigned rbp_f_depth = 0;                                                \
-      if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) {                          \
-        rbp_f_path[rbp_f_depth] = (a_tree)->rbt_root;                          \
-        rbp_f_depth++;                                                         \
-        while ((rbp_f_node = a_field(rbp_f_path[rbp_f_depth - 1]).Left()) !=   \
-               &(a_tree)->rbt_nil) {                                           \
-          rbp_f_path[rbp_f_depth] = rbp_f_node;                                \
-          rbp_f_depth++;                                                       \
-        }                                                                      \
-      }                                                                        \
-      /* While the path is non-empty, iterate.                      */         \
-      while (rbp_f_depth > 0) {                                                \
-        (a_var) = rbp_f_path[rbp_f_depth - 1];
-
-#define rb_foreach_end(a_type, a_field, a_tree, a_var)                         \
-  /* Find the successor.                                    */                 \
-  if ((rbp_f_node = a_field(rbp_f_path[rbp_f_depth - 1]).Right()) !=           \
-      &(a_tree)->rbt_nil) {                                                    \
-    /* The successor is the left-most node in the right   */                   \
-    /* subtree.                                           */                   \
-    rbp_f_path[rbp_f_depth] = rbp_f_node;                                      \
-    rbp_f_depth++;                                                             \
-    while ((rbp_f_node = a_field(rbp_f_path[rbp_f_depth - 1]).Left()) !=       \
-           &(a_tree)->rbt_nil) {                                               \
-      rbp_f_path[rbp_f_depth] = rbp_f_node;                                    \
-      rbp_f_depth++;                                                           \
-    }                                                                          \
-  } else {                                                                     \
-    /* The successor is above the current node.  Unwind   */                   \
-    /* until a left-leaning edge is removed from the      */                   \
-    /* path, or the path is empty.                        */                   \
-    for (rbp_f_depth--; rbp_f_depth > 0; rbp_f_depth--) {                      \
-      if (a_field(rbp_f_path[rbp_f_depth - 1]).Left() ==                       \
-          rbp_f_path[rbp_f_depth]) {                                           \
-        break;                                                                 \
-      }                                                                        \
-    }                                                                          \
-  }                                                                            \
-  }                                                                            \
-  }                                                                            \
-  }
-
 #endif /* RB_H_ */