Bug 1365460 - Remove JEMALLOC_RECYCLE/config_recycle, they are always true. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Thu, 18 May 2017 10:32:30 +0900
changeset 580755 d38a5cc8c044b6c9c80a38ec3de0b7a51732ab16
parent 580754 17fe701cb4a730ac4bcef7c0a2132f97c029e214
child 580756 bd5309c8f24f00b46262b76c82f616d0895214ce
push id59654
push userbmo:mh+mozilla@glandium.org
push dateThu, 18 May 2017 22:44:22 +0000
reviewersnjn
bugs1365460
milestone55.0a1
Bug 1365460 - Remove JEMALLOC_RECYCLE/config_recycle, they are always true. r?njn
memory/mozjemalloc/mozjemalloc.cpp
--- a/memory/mozjemalloc/mozjemalloc.cpp
+++ b/memory/mozjemalloc/mozjemalloc.cpp
@@ -207,22 +207,16 @@ typedef long ssize_t;
 #endif
 
 /*
  * Allow unmapping pages on all platforms. Note that if this is disabled,
  * jemalloc will never unmap anything, instead recycling pages for later use.
  */
 #define JEMALLOC_MUNMAP
 
-/*
- * Enable limited chunk recycling on all platforms. Note that when
- * JEMALLOC_MUNMAP is not defined, all chunks will be recycled unconditionally.
- */
-#define JEMALLOC_RECYCLE
-
 #ifndef MOZ_MEMORY_WINDOWS
 #ifndef MOZ_MEMORY_SOLARIS
 #include <sys/cdefs.h>
 #endif
 #ifndef __DECONST
 #  define __DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
 #endif
 #include <sys/mman.h>
@@ -836,22 +830,16 @@ struct arena_s {
  */
 
 #ifdef JEMALLOC_MUNMAP
 static const bool config_munmap = true;
 #else
 static const bool config_munmap = false;
 #endif
 
-#ifdef JEMALLOC_RECYCLE
-static const bool config_recycle = true;
-#else
-static const bool config_recycle = false;
-#endif
-
 /*
  * When MALLOC_STATIC_SIZES is defined most of the parameters
  * controlling the malloc behavior are defined as compile-time constants
  * for best performance and cannot be altered at runtime.
  */
 #if !defined(__ia64__) && !defined(__sparc__) && !defined(__mips__) && !defined(__aarch64__)
 #define MALLOC_STATIC_SIZES 1
 #endif
@@ -2319,17 +2307,17 @@ chunk_recycle(extent_tree_t *chunks_szad
 		node->addr = (void *)((uintptr_t)(ret) + size);
 		node->size = trailsize;
 		node->zeroed = zeroed;
 		extent_tree_szad_insert(chunks_szad, node);
 		extent_tree_ad_insert(chunks_ad, node);
 		node = NULL;
 	}
 
-	if (config_munmap && config_recycle)
+	if (config_munmap)
 		recycled_size -= size;
 
 	malloc_mutex_unlock(&chunks_mtx);
 
 	if (node != NULL)
 		base_node_dealloc(node);
 #ifdef MALLOC_DECOMMIT
 	pages_commit(ret, size);
@@ -2367,17 +2355,17 @@ chunk_alloc(size_t size, size_t alignmen
 {
 	void *ret;
 
 	assert(size != 0);
 	assert((size & chunksize_mask) == 0);
 	assert(alignment != 0);
 	assert((alignment & chunksize_mask) == 0);
 
-	if (!config_munmap || (config_recycle && CAN_RECYCLE(size))) {
+	if (!config_munmap || CAN_RECYCLE(size)) {
 		ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap,
 			size, alignment, base, &zero);
 		if (ret != NULL)
 			goto RETURN;
 	}
 	ret = chunk_alloc_mmap(size, alignment);
 	if (ret != NULL) {
 		goto RETURN;
@@ -2468,17 +2456,17 @@ chunk_record(extent_tree_t *chunks_szad,
 		node->addr = prev->addr;
 		node->size += prev->size;
 		node->zeroed = (node->zeroed && prev->zeroed);
 		extent_tree_szad_insert(chunks_szad, node);
 
 		xprev = prev;
 	}
 
-	if (config_munmap && config_recycle)
+	if (config_munmap)
 		recycled_size += size;
 
 label_return:
 	malloc_mutex_unlock(&chunks_mtx);
 	/*
 	 * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
 	 * avoid potential deadlock.
 	 */
@@ -2486,17 +2474,17 @@ label_return:
 		base_node_dealloc(xnode);
 	if (xprev != NULL)
 		base_node_dealloc(xprev);
 }
 
 static bool
 chunk_dalloc_mmap(void *chunk, size_t size)
 {
-	if (!config_munmap || (config_recycle && CAN_RECYCLE(size) &&
+	if (!config_munmap || (CAN_RECYCLE(size) &&
 			load_acquire_z(&recycled_size) < recycle_limit))
 		return true;
 
 	pages_unmap(chunk, size);
 	return false;
 }
 
 #undef CAN_RECYCLE
@@ -5468,17 +5456,17 @@ jemalloc_purge_freed_pages_impl()
 	size_t i;
 	malloc_spin_lock(&arenas_lock);
 	for (i = 0; i < narenas; i++) {
 		arena_t *arena = arenas[i];
 		if (arena != NULL)
 			hard_purge_arena(arena);
 	}
 	malloc_spin_unlock(&arenas_lock);
-	if (!config_munmap || config_recycle) {
+	if (!config_munmap) {
 		malloc_mutex_lock(&chunks_mtx);
 		extent_node_t *node = extent_tree_szad_first(&chunks_szad_mmap);
 		while (node) {
 			pages_decommit(node->addr, node->size);
 			pages_commit(node->addr, node->size);
 			node->zeroed = true;
 			node = extent_tree_szad_next(&chunks_szad_mmap, node);
 		}