Bug 1365460 - Remove JEMALLOC_MUNMAP/config_munmap, they are always true. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Thu, 18 May 2017 10:34:38 +0900
changeset 580756 bd5309c8f24f00b46262b76c82f616d0895214ce
parent 580755 d38a5cc8c044b6c9c80a38ec3de0b7a51732ab16
child 580757 26d72966dc3a9eac568156fbc18caccfc46b3842
push id59654
push userbmo:mh+mozilla@glandium.org
push dateThu, 18 May 2017 22:44:22 +0000
reviewersnjn
bugs1365460
milestone55.0a1
Bug 1365460 - Remove JEMALLOC_MUNMAP/config_munmap, they are always true. r?njn
memory/mozjemalloc/mozjemalloc.cpp
--- a/memory/mozjemalloc/mozjemalloc.cpp
+++ b/memory/mozjemalloc/mozjemalloc.cpp
@@ -201,22 +201,16 @@ typedef unsigned long long uintmax_t;
 typedef long long ssize_t;
 #else
 typedef long ssize_t;
 #endif
 
 #define	MALLOC_DECOMMIT
 #endif
 
-/*
- * Allow unmapping pages on all platforms. Note that if this is disabled,
- * jemalloc will never unmap anything, instead recycling pages for later use.
- */
-#define JEMALLOC_MUNMAP
-
 #ifndef MOZ_MEMORY_WINDOWS
 #ifndef MOZ_MEMORY_SOLARIS
 #include <sys/cdefs.h>
 #endif
 #ifndef __DECONST
 #  define __DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
 #endif
 #include <sys/mman.h>
@@ -824,22 +818,16 @@ struct arena_s {
 	arena_bin_t		bins[1]; /* Dynamically sized. */
 };
 
 /******************************************************************************/
 /*
  * Data.
  */
 
-#ifdef JEMALLOC_MUNMAP
-static const bool config_munmap = true;
-#else
-static const bool config_munmap = false;
-#endif
-
 /*
  * When MALLOC_STATIC_SIZES is defined most of the parameters
  * controlling the malloc behavior are defined as compile-time constants
  * for best performance and cannot be altered at runtime.
  */
 #if !defined(__ia64__) && !defined(__sparc__) && !defined(__mips__) && !defined(__aarch64__)
 #define MALLOC_STATIC_SIZES 1
 #endif
@@ -2307,18 +2295,17 @@ chunk_recycle(extent_tree_t *chunks_szad
 		node->addr = (void *)((uintptr_t)(ret) + size);
 		node->size = trailsize;
 		node->zeroed = zeroed;
 		extent_tree_szad_insert(chunks_szad, node);
 		extent_tree_ad_insert(chunks_ad, node);
 		node = NULL;
 	}
 
-	if (config_munmap)
-		recycled_size -= size;
+	recycled_size -= size;
 
 	malloc_mutex_unlock(&chunks_mtx);
 
 	if (node != NULL)
 		base_node_dealloc(node);
 #ifdef MALLOC_DECOMMIT
 	pages_commit(ret, size);
 #endif
@@ -2355,17 +2342,17 @@ chunk_alloc(size_t size, size_t alignmen
 {
 	void *ret;
 
 	assert(size != 0);
 	assert((size & chunksize_mask) == 0);
 	assert(alignment != 0);
 	assert((alignment & chunksize_mask) == 0);
 
-	if (!config_munmap || CAN_RECYCLE(size)) {
+	if (CAN_RECYCLE(size)) {
 		ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap,
 			size, alignment, base, &zero);
 		if (ret != NULL)
 			goto RETURN;
 	}
 	ret = chunk_alloc_mmap(size, alignment);
 	if (ret != NULL) {
 		goto RETURN;
@@ -2456,36 +2443,34 @@ chunk_record(extent_tree_t *chunks_szad,
 		node->addr = prev->addr;
 		node->size += prev->size;
 		node->zeroed = (node->zeroed && prev->zeroed);
 		extent_tree_szad_insert(chunks_szad, node);
 
 		xprev = prev;
 	}
 
-	if (config_munmap)
-		recycled_size += size;
+	recycled_size += size;
 
 label_return:
 	malloc_mutex_unlock(&chunks_mtx);
 	/*
 	 * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
 	 * avoid potential deadlock.
 	 */
 	if (xnode != NULL)
 		base_node_dealloc(xnode);
 	if (xprev != NULL)
 		base_node_dealloc(xprev);
 }
 
 static bool
 chunk_dalloc_mmap(void *chunk, size_t size)
 {
-	if (!config_munmap || (CAN_RECYCLE(size) &&
-			load_acquire_z(&recycled_size) < recycle_limit))
+	if (CAN_RECYCLE(size) && load_acquire_z(&recycled_size) < recycle_limit)
 		return true;
 
 	pages_unmap(chunk, size);
 	return false;
 }
 
 #undef CAN_RECYCLE
 
@@ -5456,27 +5441,16 @@ jemalloc_purge_freed_pages_impl()
 	size_t i;
 	malloc_spin_lock(&arenas_lock);
 	for (i = 0; i < narenas; i++) {
 		arena_t *arena = arenas[i];
 		if (arena != NULL)
 			hard_purge_arena(arena);
 	}
 	malloc_spin_unlock(&arenas_lock);
-	if (!config_munmap) {
-		malloc_mutex_lock(&chunks_mtx);
-		extent_node_t *node = extent_tree_szad_first(&chunks_szad_mmap);
-		while (node) {
-			pages_decommit(node->addr, node->size);
-			pages_commit(node->addr, node->size);
-			node->zeroed = true;
-			node = extent_tree_szad_next(&chunks_szad_mmap, node);
-		}
-		malloc_mutex_unlock(&chunks_mtx);
-	}
 }
 
 #else /* !defined MALLOC_DOUBLE_PURGE */
 
 MOZ_JEMALLOC_API void
 jemalloc_purge_freed_pages_impl()
 {
 	/* Do nothing. */