Bug 1403444 - Make the "static" part of what the rb_wrap macro expands to.. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Sun, 03 Sep 2017 06:49:39 +0900
changeset 671605 911f67eedc4a7f00d2470251032dba583dd3462e
parent 671604 fe4ceda8b760d39d7360ed2c1e0512e17f9a1688
child 671606 2891eb2871d968b544cd45298bd60331d8185948
push id81993
push userbmo:mh+mozilla@glandium.org
push dateThu, 28 Sep 2017 04:40:59 +0000
reviewersnjn
bugs1403444
milestone58.0a1
Bug 1403444 - Make the "static" part of what the rb_wrap macro expands to.. r?njn All uses of rb_wrap have "static" as first argument to rb_wrap, move that in the macro itself.
memory/build/mozjemalloc.cpp
memory/build/rb.h
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -1513,30 +1513,30 @@ extent_szad_comp(extent_node_t *a, exten
 
 		ret = (a_addr > b_addr) - (a_addr < b_addr);
 	}
 
 	return (ret);
 }
 
 /* Wrap red-black tree macros in functions. */
-rb_wrap(static, extent_tree_szad_, extent_tree_t, extent_node_t,
+rb_wrap(extent_tree_szad_, extent_tree_t, extent_node_t,
     link_szad, extent_szad_comp)
 
 static inline int
 extent_ad_comp(extent_node_t *a, extent_node_t *b)
 {
 	uintptr_t a_addr = (uintptr_t)a->addr;
 	uintptr_t b_addr = (uintptr_t)b->addr;
 
 	return ((a_addr > b_addr) - (a_addr < b_addr));
 }
 
 /* Wrap red-black tree macros in functions. */
-rb_wrap(static, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
+rb_wrap(extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
     extent_ad_comp)
 
 static inline int
 extent_bounds_comp(extent_node_t* aKey, extent_node_t* aNode)
 {
   uintptr_t key_addr = (uintptr_t)aKey->addr;
   uintptr_t node_addr = (uintptr_t)aNode->addr;
   size_t node_size = aNode->size;
@@ -2373,48 +2373,48 @@ arena_comp(arena_t* a, arena_t* b)
 {
   MOZ_ASSERT(a);
   MOZ_ASSERT(b);
 
   return (a->mId > b->mId) - (a->mId < b->mId);
 }
 
 /* Wrap red-black tree macros in functions. */
-rb_wrap(static, arena_tree_, arena_tree_t, arena_t, mLink, arena_comp)
+rb_wrap(arena_tree_, arena_tree_t, arena_t, mLink, arena_comp)
 
 static inline int
 arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b)
 {
 	uintptr_t a_chunk = (uintptr_t)a;
 	uintptr_t b_chunk = (uintptr_t)b;
 
 	MOZ_ASSERT(a);
 	MOZ_ASSERT(b);
 
 	return ((a_chunk > b_chunk) - (a_chunk < b_chunk));
 }
 
 /* Wrap red-black tree macros in functions. */
-rb_wrap(static, arena_chunk_tree_dirty_, arena_chunk_tree_t,
+rb_wrap(arena_chunk_tree_dirty_, arena_chunk_tree_t,
     arena_chunk_t, link_dirty, arena_chunk_comp)
 
 static inline int
 arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
 {
 	uintptr_t a_mapelm = (uintptr_t)a;
 	uintptr_t b_mapelm = (uintptr_t)b;
 
 	MOZ_ASSERT(a);
 	MOZ_ASSERT(b);
 
 	return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
 }
 
 /* Wrap red-black tree macros in functions. */
-rb_wrap(static, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, link,
+rb_wrap(arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, link,
     arena_run_comp)
 
 static inline int
 arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
 {
 	int ret;
 	size_t a_size = a->bits & ~pagesize_mask;
 	size_t b_size = b->bits & ~pagesize_mask;
@@ -2436,17 +2436,17 @@ arena_avail_comp(arena_chunk_map_t *a, a
 
 		ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
 	}
 
 	return (ret);
 }
 
 /* Wrap red-black tree macros in functions. */
-rb_wrap(static, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t, link,
+rb_wrap(arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t, link,
     arena_avail_comp)
 
 static inline void *
 arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
 {
 	void *ret;
 	unsigned i, mask, bit, regind;
 
--- a/memory/build/rb.h
+++ b/memory/build/rb.h
@@ -701,62 +701,62 @@ struct RedBlackTree
 /*
  * The rb_wrap() macro provides a convenient way to wrap functions around the
  * cpp macros.  The main benefits of wrapping are that 1) repeated macro
  * expansion can cause code bloat, especially for rb_{insert,remove)(), and
  * 2) type, linkage, comparison functions, etc. need not be specified at every
  * call point.
  */
 
-#define	rb_wrap(a_attr, a_prefix, a_tree_type, a_type, a_field, a_cmp)	\
-a_attr void								\
+#define	rb_wrap(a_prefix, a_tree_type, a_type, a_field, a_cmp)		\
+static void								\
 a_prefix##new(a_tree_type *tree) {					\
     rb_new(a_type, a_field, tree);					\
 }									\
-a_attr a_type *								\
+static a_type *								\
 a_prefix##first(a_tree_type *tree) {					\
     a_type *ret;							\
     rb_first(a_type, a_field, tree, ret);				\
     return (ret);							\
 }									\
-a_attr a_type *								\
+static a_type *								\
 a_prefix##last(a_tree_type *tree) {					\
     a_type *ret;							\
     rb_last(a_type, a_field, tree, ret);				\
     return (ret);							\
 }									\
-a_attr a_type *								\
+static a_type *								\
 a_prefix##next(a_tree_type *tree, a_type *node) {			\
     a_type *ret;							\
     rb_next(a_type, a_field, a_cmp, tree, node, ret);			\
     return (ret);							\
 }									\
-a_attr a_type *								\
+static a_type *								\
 a_prefix##prev(a_tree_type *tree, a_type *node) {			\
     a_type *ret;							\
     rb_prev(a_type, a_field, a_cmp, tree, node, ret);			\
     return (ret);							\
 }									\
-a_attr a_type *								\
+static a_type *								\
 a_prefix##search(a_tree_type *tree, a_type *key) {			\
     a_type *ret;							\
     rb_search(a_type, a_field, a_cmp, tree, key, ret);			\
     return (ret);							\
 }									\
-a_attr a_type *								\
+static a_type *								\
 a_prefix##nsearch(a_tree_type *tree, a_type *key) {			\
     a_type *ret;							\
     rb_nsearch(a_type, a_field, a_cmp, tree, key, ret);			\
     return (ret);							\
 }									\
-a_attr void								\
+static void								\
 a_prefix##insert(a_tree_type *tree, a_type *node) {			\
     rb_insert(a_type, a_field, a_cmp, tree, node);			\
 }									\
-a_attr void								\
+static void								\
 a_prefix##remove(a_tree_type *tree, a_type *node) {			\
     rb_remove(a_type, a_field, a_cmp, tree, node);			\
 }
 
 /*
  * The iterators simulate recursion via an array of pointers that store the
  * current path.  This is critical to performance, since a series of calls to
  * rb_{next,prev}() would require time proportional to (n lg n), whereas this