Bug 1396681 - Remove some typedefs in mozjemalloc. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Sat, 02 Sep 2017 08:15:39 +0900
changeset 658836 76f58be160c5154514700fa7c60a1dc0ac56e1b9
parent 658813 1401e3eec44df87963d3af329ef8a4183ab0483f
child 658956 111eb83abaa5f4299a4b0ed9f08c462218bec485
push id77884
push userbmo:mh+mozilla@glandium.org
push dateMon, 04 Sep 2017 23:26:34 +0000
reviewersnjn
bugs1396681
milestone57.0a1
Bug 1396681 - Remove some typedefs in mozjemalloc. r?njn Previously being a C codebase, mozjemalloc was using typedefs, avoiding long "struct foo" uses. C++ doesn't need typedefs to achieve that, so we can remove all that. We however keep a few typedefs in headers that are still included from other C source files.
memory/mozjemalloc/linkedlist.h
memory/mozjemalloc/mozjemalloc.cpp
--- a/memory/mozjemalloc/linkedlist.h
+++ b/memory/mozjemalloc/linkedlist.h
@@ -30,19 +30,17 @@
  *
  *******************************************************************************/
 
 #ifndef linkedlist_h__
 #define linkedlist_h__
 
 #include <stddef.h>
 
-typedef struct LinkedList_s LinkedList;
-
-struct LinkedList_s {
+struct LinkedList {
 	LinkedList *next;
 	LinkedList *prev;
 };
 
 /* Convert from LinkedList* to foo*. */
 #define LinkedList_Get(e, type, prop) \
   (type*)((char*)(e) - offsetof(type, prop))
 
--- a/memory/mozjemalloc/mozjemalloc.cpp
+++ b/memory/mozjemalloc/mozjemalloc.cpp
@@ -415,22 +415,22 @@ static pthread_key_t tlsIndex;
  * Mutexes based on spinlocks.  We can't use normal pthread spinlocks in all
  * places, because they require malloc()ed memory, which causes bootstrapping
  * issues in some cases.
  */
 #if defined(XP_WIN)
 #define malloc_mutex_t CRITICAL_SECTION
 #define malloc_spinlock_t CRITICAL_SECTION
 #elif defined(XP_DARWIN)
-typedef struct {
+struct malloc_mutex_t {
 	OSSpinLock	lock;
-} malloc_mutex_t;
-typedef struct {
+};
+struct malloc_spinlock_t {
 	OSSpinLock	lock;
-} malloc_spinlock_t;
+};
 #else
 typedef pthread_mutex_t malloc_mutex_t;
 typedef pthread_mutex_t malloc_spinlock_t;
 #endif
 
 /* Set to true once the allocator has been initialized. */
 static bool malloc_initialized = false;
 
@@ -444,24 +444,22 @@ static malloc_mutex_t init_lock = PTHREA
 static malloc_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
 #endif
 
 /******************************************************************************/
 /*
  * Statistics data structures.
  */
 
-typedef struct malloc_bin_stats_s malloc_bin_stats_t;
-struct malloc_bin_stats_s {
+struct malloc_bin_stats_t {
 	/* Current number of runs in this bin. */
 	unsigned long	curruns;
 };
 
-typedef struct arena_stats_s arena_stats_t;
-struct arena_stats_s {
+struct arena_stats_t {
 	/* Number of bytes currently mapped. */
 	size_t		mapped;
 
 	/* Current number of committed pages. */
 	size_t		committed;
 
 	/* Per-size-category statistics. */
 	size_t		allocated_small;
@@ -478,18 +476,17 @@ enum ChunkType {
   UNKNOWN_CHUNK,
   ZEROED_CHUNK,   // chunk only contains zeroes
   ARENA_CHUNK,    // used to back arena runs created by arena_run_alloc
   HUGE_CHUNK,     // used to back huge allocations (e.g. huge_malloc)
   RECYCLED_CHUNK, // chunk has been stored for future use by chunk_recycle
 };
 
 /* Tree of extents. */
-typedef struct extent_node_s extent_node_t;
-struct extent_node_s {
+struct extent_node_t {
 	/* Linkage for the size/address-ordered tree. */
 	rb_node(extent_node_t) link_szad;
 
 	/* Linkage for the address-ordered tree. */
 	rb_node(extent_node_t) link_ad;
 
 	/* Pointer to the extent that this tree node is responsible for. */
 	void	*addr;
@@ -512,35 +509,33 @@ typedef rb_tree(extent_node_t) extent_tr
  * depth.
  */
 #if (SIZEOF_PTR == 4)
 #  define MALLOC_RTREE_NODESIZE (1U << 14)
 #else
 #  define MALLOC_RTREE_NODESIZE CACHELINE
 #endif
 
-typedef struct malloc_rtree_s malloc_rtree_t;
-struct malloc_rtree_s {
+struct malloc_rtree_t {
 	malloc_spinlock_t	lock;
 	void			**root;
 	unsigned		height;
 	unsigned		level2bits[1]; /* Dynamically sized. */
 };
 
 /******************************************************************************/
 /*
  * Arena data structures.
  */
 
-typedef struct arena_s arena_t;
-typedef struct arena_bin_s arena_bin_t;
+struct arena_t;
+struct arena_bin_t;
 
 /* Each element of the chunk map corresponds to one page within the chunk. */
-typedef struct arena_chunk_map_s arena_chunk_map_t;
-struct arena_chunk_map_s {
+struct arena_chunk_map_t {
 	/*
 	 * Linkage for run trees.  There are two disjoint uses:
 	 *
 	 * 1) arena_t's runs_avail tree.
 	 * 2) arena_run_t conceptually uses this linkage for in-use non-full
 	 *    runs, rather than directly embedding linkage.
 	 */
 	rb_node(arena_chunk_map_t)	link;
@@ -613,18 +608,17 @@ struct arena_chunk_map_s {
 #define	CHUNK_MAP_ZEROED	((size_t)0x04U)
 #define	CHUNK_MAP_LARGE		((size_t)0x02U)
 #define	CHUNK_MAP_ALLOCATED	((size_t)0x01U)
 };
 typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
 typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
 
 /* Arena chunk header. */
-typedef struct arena_chunk_s arena_chunk_t;
-struct arena_chunk_s {
+struct arena_chunk_t {
 	/* Arena that owns the chunk. */
 	arena_t		*arena;
 
 	/* Linkage for the arena's chunks_dirty tree. */
 	rb_node(arena_chunk_t) link_dirty;
 
 #ifdef MALLOC_DOUBLE_PURGE
 	/* If we're double-purging, we maintain a linked list of chunks which
@@ -639,18 +633,17 @@ struct arena_chunk_s {
 	/* Number of dirty pages. */
 	size_t		ndirty;
 
 	/* Map of pages within chunk that keeps track of free/large/small. */
 	arena_chunk_map_t map[1]; /* Dynamically sized. */
 };
 typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
 
-typedef struct arena_run_s arena_run_t;
-struct arena_run_s {
+struct arena_run_t {
 #if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
 	uint32_t	magic;
 #  define ARENA_RUN_MAGIC 0x384adf93
 #endif
 
 	/* Bin this run is associated with. */
 	arena_bin_t	*bin;
 
@@ -659,17 +652,17 @@ struct arena_run_s {
 
 	/* Number of free regions in run. */
 	unsigned	nfree;
 
 	/* Bitmask of in-use regions (0: in use, 1: free). */
 	unsigned	regs_mask[1]; /* Dynamically sized. */
 };
 
-struct arena_bin_s {
+struct arena_bin_t {
 	/*
 	 * Current run being used to service allocations of this bin's size
 	 * class.
 	 */
 	arena_run_t	*runcur;
 
 	/*
 	 * Tree of non-full runs.  This tree is used when looking for an
@@ -694,17 +687,17 @@ struct arena_bin_s {
 
 	/* Offset of first region in a run for this bin's size class. */
 	uint32_t	reg0_offset;
 
 	/* Bin statistics. */
 	malloc_bin_stats_t stats;
 };
 
-struct arena_s {
+struct arena_t {
 #if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
 	uint32_t		magic;
 #  define ARENA_MAGIC 0x947d3d24
 #endif
 
 	/* All operations on this arena require that lock be locked. */
 	malloc_spinlock_t	lock;