Bug 1365460 - Remove JEMALLOC_USES_MAP_ALIGN. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Thu, 18 May 2017 11:30:52 +0900
changeset 580762 29ed98979183399fb012fc14a0799c69dab90986
parent 580761 b0b79cdcdc362151e253b8a72d82352307419fa7
child 580763 eebb11f55dbd279b9500e7ea05cd03a5f865f53c
push id59654
push userbmo:mh+mozilla@glandium.org
push dateThu, 18 May 2017 22:44:22 +0000
reviewersnjn
bugs1365460, 457189
milestone55.0a1
Bug 1365460 - Remove JEMALLOC_USES_MAP_ALIGN. r?njn It's a Solaris-only optimization that was used as a workaround for some infinite loop in pages_map (bug 457189). In the meanwhile, the way pages_map works has changed in such a way the infinite loop can't happen anymore. Specifically, the original problem was that pages_map would try to allocate something larger than a chunk, then deallocate it, and reallocate at a hinted aligned address near the address of the now deallocated mapping, hopefully getting an aligned chunk. But Solaris would ignore the hint and the chunk would never be aligned, causing the infinite loop. What the code does now it over-allocate, find an aligned chunk in there, and deallocate what ends up not being needed. Which leaves no room for the original infinite loop. We thus remove the workaround and put Solaris in par with other Unix platforms.
memory/mozjemalloc/mozjemalloc.cpp
--- a/memory/mozjemalloc/mozjemalloc.cpp
+++ b/memory/mozjemalloc/mozjemalloc.cpp
@@ -307,20 +307,16 @@ void *_mmap(void *addr, size_t length, i
 #define munmap(a, l) syscall(SYS_munmap, a, l)
 #endif
 #endif
 
 #ifdef MOZ_MEMORY_DARWIN
 static pthread_key_t tlsIndex;
 #endif
 
-#if defined(MOZ_MEMORY_SOLARIS) && defined(MAP_ALIGN) && !defined(JEMALLOC_NEVER_USES_MAP_ALIGN)
-#define JEMALLOC_USES_MAP_ALIGN	 /* Required on Solaris 10. Might improve performance elsewhere. */
-#endif
-
 #ifdef MOZ_MEMORY_WINDOWS
    /* MSVC++ does not support C99 variable-length arrays. */
 #  define RB_NO_C99_VARARRAYS
 #endif
 #include "rb.h"
 
 #ifdef MOZ_DEBUG
    /* Disable inlining to make debugging easier. */
@@ -1756,37 +1752,16 @@ pages_unmap(void *addr, size_t size)
 	if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
 		_malloc_message(_getprogname(),
 		    ": (malloc) Error in VirtualFree()\n", "", "");
 		if (opt_abort)
 			moz_abort();
 	}
 }
 #else
-#ifdef JEMALLOC_USES_MAP_ALIGN
-static void *
-pages_map_align(size_t size, size_t alignment)
-{
-	void *ret;
-
-	/*
-	 * We don't use MAP_FIXED here, because it can cause the *replacement*
-	 * of existing mappings, and we only want to create new mappings.
-	 */
-	ret = mmap((void *)alignment, size, PROT_READ | PROT_WRITE,
-		MAP_PRIVATE | MAP_NOSYNC | MAP_ALIGN | MAP_ANON, -1, 0);
-	MOZ_ASSERT(ret != NULL);
-
-	if (ret == MAP_FAILED)
-		ret = NULL;
-	else
-		MozTagAnonymousMemory(ret, size, "jemalloc");
-	return (ret);
-}
-#endif
 
 static void *
 pages_map(void *addr, size_t size)
 {
 	void *ret;
 #if defined(__ia64__) || (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
         /*
          * The JS engine assumes that all allocated pointers have their high 17 bits clear,
@@ -2120,19 +2095,16 @@ chunk_alloc_mmap_slow(size_t size, size_
 
         MOZ_ASSERT(ret != NULL);
         return (ret);
 }
 
 static void *
 chunk_alloc_mmap(size_t size, size_t alignment)
 {
-#ifdef JEMALLOC_USES_MAP_ALIGN
-        return pages_map_align(size, alignment);
-#else
         void *ret;
         size_t offset;
 
         /*
          * Ideally, there would be a way to specify alignment to mmap() (like
          * NetBSD has), but in the absence of such a feature, we have to work
          * hard to efficiently create aligned mappings. The reliable, but
          * slow method is to create a mapping that is over-sized, then trim the
@@ -2150,17 +2122,16 @@ chunk_alloc_mmap(size_t size, size_t ali
         offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
         if (offset != 0) {
                 pages_unmap(ret, size);
                 return (chunk_alloc_mmap_slow(size, alignment));
         }
 
         MOZ_ASSERT(ret != NULL);
         return (ret);
-#endif
 }
 
 bool
 pages_purge(void *addr, size_t length)
 {
 	bool unzeroed;
 
 #ifdef MALLOC_DECOMMIT
@@ -4825,25 +4796,16 @@ MALLOC_OUT:
 	arena_chunk_header_npages = calculate_arena_header_pages();
 	arena_maxclass = calculate_arena_maxclass();
 
 	recycle_limit = CHUNK_RECYCLE_LIMIT * chunksize;
 #endif
 
 	recycled_size = 0;
 
-#ifdef JEMALLOC_USES_MAP_ALIGN
-	/*
-	 * When using MAP_ALIGN, the alignment parameter must be a power of two
-	 * multiple of the system pagesize, or mmap will fail.
-	 */
-	MOZ_ASSERT((chunksize % pagesize) == 0);
-	MOZ_ASSERT((1 << (ffs(chunksize / pagesize) - 1)) == (chunksize/pagesize));
-#endif
-
 	/* Various sanity checks that regard configuration. */
 	MOZ_ASSERT(quantum >= sizeof(void *));
 	MOZ_ASSERT(quantum <= pagesize);
 	MOZ_ASSERT(chunksize >= pagesize);
 	MOZ_ASSERT(quantum * 4 <= chunksize);
 
 	/* Initialize chunks data. */
 	malloc_mutex_init(&chunks_mtx);