Bug 1229384 - Invert the meaning of the arena_ralloc_large and arena_t::RallocGrowLarge return type. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 27 Oct 2017 10:14:04 +0900
changeset 687287 1870cce57be80c2abd6153c792d4e66d8ff8466b
parent 687286 a1a1a3debc9a9df8ce90092a77c9d3bb9744b9f7
child 687288 d74b30dabe37ddd48f32101ddda61f167887913e
child 688089 c2e087a49f31b96ef61a69c82d3afa1356c8ac53
push id86473
push userbmo:mh+mozilla@glandium.org
push dateFri, 27 Oct 2017 04:57:57 +0000
reviewersnjn
bugs1229384
milestone58.0a1
Bug 1229384 - Invert the meaning of the arena_ralloc_large and arena_t::RallocGrowLarge return type. r?njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -3686,16 +3686,17 @@ arena_t::RallocShrinkLarge(arena_chunk_t
    * Shrink the run, and make trailing pages available for other
    * allocations.
    */
   MutexAutoLock lock(mLock);
   TrimRunTail(aChunk, (arena_run_t*)aPtr, aOldSize, aSize, true);
   mStats.allocated_large -= aOldSize - aSize;
 }
 
+/* Returns whether reallocation was successful. */
 bool
 arena_t::RallocGrowLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
                          size_t aOldSize)
 {
   size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> pagesize_2pow;
   size_t npages = aOldSize >> pagesize_2pow;
 
   MutexAutoLock lock(mLock);
@@ -3716,54 +3717,55 @@ arena_t::RallocGrowLarge(arena_chunk_t* 
         false);
 
     aChunk->map[pageind].bits = aSize | CHUNK_MAP_LARGE |
         CHUNK_MAP_ALLOCATED;
     aChunk->map[pageind+npages].bits = CHUNK_MAP_LARGE |
         CHUNK_MAP_ALLOCATED;
 
     mStats.allocated_large += aSize - aOldSize;
-    return false;
+    return true;
   }
 
-  return true;
+  return false;
 }
 
 /*
  * Try to resize a large allocation, in order to avoid copying.  This will
  * always fail if growing an object, and the following run is already in use.
+ * Returns whether reallocation was successful.
  */
 static bool
 arena_ralloc_large(void* aPtr, size_t aSize, size_t aOldSize)
 {
   size_t psize;
 
   psize = PAGE_CEILING(aSize);
   if (psize == aOldSize) {
     /* Same size class. */
     if (aSize < aOldSize) {
       memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
     }
-    return false;
+    return true;
   } else {
     arena_chunk_t* chunk;
     arena_t* arena;
 
     chunk = GetChunkForPtr(aPtr);
     arena = chunk->arena;
     MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
 
     if (psize < aOldSize) {
       /* Fill before shrinking in order avoid a race. */
       memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
       arena->RallocShrinkLarge(chunk, aPtr, psize, aOldSize);
-      return false;
+      return true;
     } else {
       bool ret = arena->RallocGrowLarge(chunk, aPtr, psize, aOldSize);
-      if (ret == false && opt_zero) {
+      if (ret && opt_zero) {
         memset((void*)((uintptr_t)aPtr + aOldSize), 0, aSize - aOldSize);
       }
       return ret;
     }
   }
 }
 
 static void*
@@ -3787,17 +3789,17 @@ arena_ralloc(void* aPtr, size_t aSize, s
     }
   } else if (aSize <= bin_maxclass) {
     if (aOldSize > small_max && aOldSize <= bin_maxclass &&
         pow2_ceil(aSize) == pow2_ceil(aOldSize)) {
       goto IN_PLACE; /* Same size class. */
     }
   } else if (aOldSize > bin_maxclass && aOldSize <= arena_maxclass) {
     MOZ_ASSERT(aSize > bin_maxclass);
-    if (arena_ralloc_large(aPtr, aSize, aOldSize) == false) {
+    if (arena_ralloc_large(aPtr, aSize, aOldSize)) {
       return aPtr;
     }
   }
 
   /*
    * If we get here, then aSize and aOldSize are different enough that we
    * need to move the object.  In that case, fall back to allocating new
    * space and copying.