Bug 1365460 - Avoid unused variables. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Thu, 18 May 2017 12:24:57 +0900
changeset 580765 22d5bb26065d6fe488d218be881a89e1a93571a5
parent 580764 35b42ba287f246e0af397a777b1de404929b5ce9
child 580766 ac6414517a0aa7c98e017c14ddbb09287f9d8e4a
push id59654
push userbmo:mh+mozilla@glandium.org
push dateThu, 18 May 2017 22:44:22 +0000
reviewersnjn
bugs1365460
milestone55.0a1
Bug 1365460 - Avoid unused variables. r?njn
memory/mozjemalloc/moz.build
memory/mozjemalloc/mozjemalloc.cpp
--- a/memory/mozjemalloc/moz.build
+++ b/memory/mozjemalloc/moz.build
@@ -20,14 +20,15 @@ if CONFIG['OS_ARCH'] == 'SunOS' and not 
 
 DEFINES['MOZ_JEMALLOC_IMPL'] = True
 
 LOCAL_INCLUDES += [
     '/memory/build',
 ]
 
 if CONFIG['GNU_CXX']:
-    CXXFLAGS += ['-Wno-unused'] # too many annoying warnings from mfbt/ headers
+    # too many warnings from functions generated through rb_wrab from rb.h.
+    CXXFLAGS += ['-Wno-unused-function']
 
 if CONFIG['_MSC_VER']:
     CXXFLAGS += ['-wd4273'] # inconsistent dll linkage (bug 558163)
 
 DISABLE_STL_WRAPPING = True
--- a/memory/mozjemalloc/mozjemalloc.cpp
+++ b/memory/mozjemalloc/mozjemalloc.cpp
@@ -988,17 +988,17 @@ static arena_t		**arenas;
 static unsigned		narenas;
 static malloc_spinlock_t arenas_lock; /* Protects arenas initialization. */
 
 #ifndef NO_TLS
 /*
  * Map of pthread_self() --> arenas[???], used for selecting an arena to use
  * for allocations.
  */
-#ifndef MOZ_MEMORY_WINDOWS
+#if !defined(MOZ_MEMORY_WINDOWS) && !defined(MOZ_MEMORY_DARWIN)
 static __thread arena_t	*arenas_map;
 #endif
 #endif
 
 /*******************************/
 /*
  * Runtime configuration options.
  */
@@ -2844,31 +2844,33 @@ arena_run_split(arena_t *arena, arena_ru
 
 	if (chunk->ndirty == 0 && old_ndirty > 0)
 		arena_chunk_tree_dirty_remove(&arena->chunks_dirty, chunk);
 }
 
 static void
 arena_chunk_init(arena_t *arena, arena_chunk_t *chunk)
 {
-	arena_run_t *run;
 	size_t i;
 
 	arena->stats.mapped += chunksize;
 
 	chunk->arena = arena;
 
 	/*
 	 * Claim that no pages are in use, since the header is merely overhead.
 	 */
 	chunk->ndirty = 0;
 
 	/* Initialize the map to contain one maximal free untouched run. */
-	run = (arena_run_t *)((uintptr_t)chunk + (arena_chunk_header_npages <<
-	    pagesize_2pow));
+#ifdef MALLOC_DECOMMIT
+	arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
+	                   (arena_chunk_header_npages << pagesize_2pow));
+#endif
+
 	for (i = 0; i < arena_chunk_header_npages; i++)
 		chunk->map[i].bits = 0;
 	chunk->map[i].bits = arena_maxclass | CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED;
 	for (i++; i < chunk_npages-1; i++) {
 		chunk->map[i].bits = CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED;
 	}
 	chunk->map[chunk_npages-1].bits = arena_maxclass | CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED;
 
@@ -4084,17 +4086,17 @@ iralloc(void *ptr, size_t size)
 		return (huge_ralloc(ptr, size, oldsize));
 }
 
 static bool
 arena_new(arena_t *arena)
 {
 	unsigned i;
 	arena_bin_t *bin;
-	size_t pow2_size, prev_run_size;
+	size_t prev_run_size;
 
 	if (malloc_spin_init(&arena->lock))
 		return (true);
 
 	memset(&arena->stats, 0, sizeof(arena_stats_t));
 
 	/* Initialize chunks. */
 	arena_chunk_tree_dirty_new(&arena->chunks_dirty);
@@ -4126,17 +4128,16 @@ arena_new(arena_t *arena)
 	/* Quantum-spaced bins. */
 	for (; i < ntbins + nqbins; i++) {
 		bin = &arena->bins[i];
 		bin->runcur = NULL;
 		arena_run_tree_new(&bin->runs);
 
 		bin->reg_size = quantum * (i - ntbins + 1);
 
-		pow2_size = pow2_ceil(quantum * (i - ntbins + 1));
 		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
 
 		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
 	}
 
 	/* (2^n)-spaced sub-page bins. */
 	for (; i < ntbins + nqbins + nsbins; i++) {
 		bin = &arena->bins[i];
@@ -4569,19 +4570,16 @@ extern "C" void register_zone(void);
 static
 #endif
 bool
 malloc_init_hard(void)
 {
 	unsigned i;
 	const char *opts;
 	long result;
-#ifndef MOZ_MEMORY_WINDOWS
-	int linklen;
-#endif
 
 #ifndef MOZ_MEMORY_WINDOWS
 	malloc_mutex_lock(&init_lock);
 #endif
 
 	if (malloc_initialized) {
 		/*
 		 * Another thread initialized the allocator before this one
@@ -5351,17 +5349,16 @@ hard_purge_chunk(arena_chunk_t *chunk)
 
 /* Explicitly remove all of this arena's MADV_FREE'd pages from memory. */
 static void
 hard_purge_arena(arena_t *arena)
 {
 	malloc_spin_lock(&arena->lock);
 
 	while (!LinkedList_IsEmpty(&arena->chunks_madvised)) {
-		LinkedList* next = arena->chunks_madvised.next;
 		arena_chunk_t *chunk =
 			LinkedList_Get(arena->chunks_madvised.next,
 				       arena_chunk_t, chunks_madvised_elem);
 		hard_purge_chunk(chunk);
 		LinkedList_Remove(&chunk->chunks_madvised_elem);
 	}
 
 	malloc_spin_unlock(&arena->lock);