Bug 1411786 - Don't call chunk_recycle for base allocations. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Thu, 26 Oct 2017 08:24:08 +0900
changeset 686641 b7eae144e1290db3bce111589086dde2c2b5456d
parent 686640 886593c39794c7afbd9fe8ef0090f44c45dedb26
child 686642 6cb10204ba6a1e1209faa4920038e817b844d943
push id86234
push userbmo:mh+mozilla@glandium.org
push dateThu, 26 Oct 2017 05:06:52 +0000
reviewersnjn
bugs1411786
milestone58.0a1
Bug 1411786 - Don't call chunk_recycle for base allocations. r?njn Instead of calling it with a boolean indicating whether the call was for base allocations or not, and return immediately if it was, avoid the call altogether.
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -1925,34 +1925,24 @@ pages_purge(void *addr, size_t length, b
 #    undef JEMALLOC_MADV_ZEROS
 #  endif
 #endif
 }
 
 static void *
 chunk_recycle(RedBlackTree<extent_node_t, ExtentTreeSzTrait>* chunks_szad,
               RedBlackTree<extent_node_t, ExtentTreeTrait>* chunks_ad,
-              size_t size, size_t alignment, bool base, bool *zeroed)
+              size_t size, size_t alignment, bool *zeroed)
 {
 	void *ret;
 	extent_node_t *node;
 	extent_node_t key;
 	size_t alloc_size, leadsize, trailsize;
 	ChunkType chunk_type;
 
-	if (base) {
-		/*
-		 * This function may need to call base_node_{,de}alloc(), but
-		 * the current chunk allocation request is on behalf of the
-		 * base allocator.  Avoid deadlock (and if that weren't an
-		 * issue, potential for infinite recursion) by returning nullptr.
-		 */
-		return nullptr;
-	}
-
 	alloc_size = size + alignment - chunksize;
 	/* Beware size_t wrap-around. */
 	if (alloc_size < size)
 		return nullptr;
 	key.addr = nullptr;
 	key.size = alloc_size;
 	chunks_mtx.Lock();
 	node = chunks_szad->SearchOrNext(&key);
@@ -2045,19 +2035,21 @@ chunk_alloc(size_t size, size_t alignmen
 {
 	void *ret;
 
 	MOZ_ASSERT(size != 0);
 	MOZ_ASSERT((size & chunksize_mask) == 0);
 	MOZ_ASSERT(alignment != 0);
 	MOZ_ASSERT((alignment & chunksize_mask) == 0);
 
-	if (CAN_RECYCLE(size)) {
+	// Base allocations can't be fulfilled by recycling because of
+	// possible deadlock or infinite recursion.
+	if (CAN_RECYCLE(size) && !base) {
 		ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap,
-			size, alignment, base, zeroed);
+			size, alignment, zeroed);
 		if (ret)
 			goto RETURN;
 	}
 	ret = chunk_alloc_mmap(size, alignment);
 	if (zeroed)
 		*zeroed = true;
 	if (ret) {
 		goto RETURN;