Bug 1379139 - Instead of not recycling huge chunks, zero them. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Mon, 10 Jul 2017 06:41:17 +0900
changeset 606576 971fdf0be9ba8b65c4a30725483d7e96745f5a19
parent 605096 b84a4439260322c260fc272e3b8a3537b616d3f1
child 636796 d79adba7a99aefd5af1e5429d9b45d7de99e2ec7
push id67730
push userbmo:mh+mozilla@glandium.org
push dateTue, 11 Jul 2017 03:59:46 +0000
reviewersnjn
bugs1379139
milestone56.0a1
Bug 1379139 - Instead of not recycling huge chunks, zero them. r?njn It turns out that not recycling some kinds of chunk can lead to the recycle queue being starved in some scenarios. When that happens, we end up mmap()ing new memory, but that turns out to be significantly slower. So instead of not recycling huge chunks, we force-clean them, before madvising so that the pages can still be reclaimed in case of memory pressure.
memory/mozjemalloc/mozjemalloc.cpp
--- a/memory/mozjemalloc/mozjemalloc.cpp
+++ b/memory/mozjemalloc/mozjemalloc.cpp
@@ -1919,47 +1919,51 @@ chunk_alloc_mmap(size_t size, size_t ali
                 return (chunk_alloc_mmap_slow(size, alignment));
         }
 
         MOZ_ASSERT(ret);
         return (ret);
 }
 
 bool
-pages_purge(void *addr, size_t length)
+pages_purge(void *addr, size_t length, bool force_zero)
 {
 	bool unzeroed;
 
 #ifdef MALLOC_DECOMMIT
 	pages_decommit(addr, length);
 	unzeroed = false;
 #else
+#  ifndef MOZ_MEMORY_LINUX
+	if (force_zero)
+		memset(addr, 0, length);
+#  endif
 #  ifdef MOZ_MEMORY_WINDOWS
 	/*
 	* The region starting at addr may have been allocated in multiple calls
 	* to VirtualAlloc and recycled, so resetting the entire region in one
 	* go may not be valid. However, since we allocate at least a chunk at a
 	* time, we may touch any region in chunksized increments.
 	*/
 	size_t pages_size = std::min(length, chunksize -
 		CHUNK_ADDR2OFFSET((uintptr_t)addr));
 	while (length > 0) {
 		VirtualAlloc(addr, pages_size, MEM_RESET, PAGE_READWRITE);
 		addr = (void *)((uintptr_t)addr + pages_size);
 		length -= pages_size;
 		pages_size = std::min(length, chunksize);
 	}
-	unzeroed = true;
+	unzeroed = !force_zero;
 #  else
 #    ifdef MOZ_MEMORY_LINUX
 #      define JEMALLOC_MADV_PURGE MADV_DONTNEED
 #      define JEMALLOC_MADV_ZEROS true
 #    else /* FreeBSD and Darwin. */
 #      define JEMALLOC_MADV_PURGE MADV_FREE
-#      define JEMALLOC_MADV_ZEROS false
+#      define JEMALLOC_MADV_ZEROS force_zero
 #    endif
 	int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
 	unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0);
 #    undef JEMALLOC_MADV_PURGE
 #    undef JEMALLOC_MADV_ZEROS
 #  endif
 #endif
 	return (unzeroed);
@@ -2116,23 +2120,17 @@ RETURN:
 
 static void
 chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
     size_t size, enum ChunkType type)
 {
 	bool unzeroed;
 	extent_node_t *xnode, *node, *prev, *xprev, key;
 
-	unzeroed = pages_purge(chunk, size);
-
-	/* If purge doesn't zero the chunk, only record arena chunks or
-	 * previously recycled chunks. */
-	if (unzeroed && type != ARENA_CHUNK && type != RECYCLED_CHUNK) {
-		return;
-	}
+	unzeroed = pages_purge(chunk, size, type == HUGE_CHUNK);
 
 	/*
 	 * Allocate a node before acquiring chunks_mtx even though it might not
 	 * be needed, because base_node_alloc() may cause a new base chunk to
 	 * be allocated, which could cause deadlock if chunks_mtx were already
 	 * held.
 	 */
 	xnode = base_node_alloc();