Bug 1411786 - Rename chunk_{recycle,record,alloc,dealloc} arguments. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Thu, 26 Oct 2017 08:48:18 +0900
changeset 686644 5020684d66ccfe6dd3ef4fe0a23403f551146a80
parent 686643 37bf3caa9a0e60f4b50d10992961365411a71761
child 686645 124d2e277a55cb09dc499c5c9c0ce9eb0cc327d0
push id86234
push userbmo:mh+mozilla@glandium.org
push dateThu, 26 Oct 2017 05:06:52 +0000
reviewersnjn
bugs1411786
milestone58.0a1
Bug 1411786 - Rename chunk_{recycle,record,alloc,dealloc} arguments. r?njn Also reformat a few things clang-format left out.
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -1167,18 +1167,18 @@ static const bool	opt_zero = false;
 
 static size_t	opt_dirty_max = DIRTY_MAX_DEFAULT;
 
 /******************************************************************************/
 /*
  * Begin forward declarations.
  */
 
-static void	*chunk_alloc(size_t size, size_t alignment, bool base, bool *zeroed=nullptr);
-static void	chunk_dealloc(void *chunk, size_t size, ChunkType chunk_type);
+static void* chunk_alloc(size_t aSize, size_t aAlignment, bool aBase, bool* aZeroed = nullptr);
+static void chunk_dealloc(void* aChunk, size_t aSize, ChunkType aType);
 static void chunk_ensure_zero(void* aPtr, size_t aSize, bool aZeroed);
 static arena_t	*arenas_extend();
 static void	*huge_malloc(size_t size, bool zero);
 static void* huge_palloc(size_t aSize, size_t aAlignment, bool aZero);
 static void* huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize);
 static void huge_dalloc(void* aPtr);
 #ifdef XP_WIN
 extern "C"
@@ -1923,95 +1923,97 @@ pages_purge(void *addr, size_t length, b
 	return JEMALLOC_MADV_ZEROS && err == 0;
 #    undef JEMALLOC_MADV_PURGE
 #    undef JEMALLOC_MADV_ZEROS
 #  endif
 #endif
 }
 
 static void*
-chunk_recycle(size_t size, size_t alignment, bool* zeroed)
+chunk_recycle(size_t aSize, size_t aAlignment, bool* aZeroed)
 {
   void* ret;
   extent_node_t* node;
   extent_node_t key;
   size_t alloc_size, leadsize, trailsize;
   ChunkType chunk_type;
 
-  alloc_size = size + alignment - chunksize;
+  alloc_size = aSize + aAlignment - chunksize;
   /* Beware size_t wrap-around. */
-  if (alloc_size < size)
+  if (alloc_size < aSize) {
     return nullptr;
+  }
   key.addr = nullptr;
   key.size = alloc_size;
   chunks_mtx.Lock();
   node = gChunksBySize.SearchOrNext(&key);
   if (!node) {
     chunks_mtx.Unlock();
     return nullptr;
   }
   leadsize =
-    ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) - (uintptr_t)node->addr;
-  MOZ_ASSERT(node->size >= leadsize + size);
-  trailsize = node->size - leadsize - size;
+    ALIGNMENT_CEILING((uintptr_t)node->addr, aAlignment) - (uintptr_t)node->addr;
+  MOZ_ASSERT(node->size >= leadsize + aSize);
+  trailsize = node->size - leadsize - aSize;
   ret = (void*)((uintptr_t)node->addr + leadsize);
   chunk_type = node->chunk_type;
-  if (zeroed) {
-    *zeroed = (chunk_type == ZEROED_CHUNK);
+  if (aZeroed) {
+    *aZeroed = (chunk_type == ZEROED_CHUNK);
   }
   /* Remove node from the tree. */
   gChunksBySize.Remove(node);
   gChunksByAddress.Remove(node);
   if (leadsize != 0) {
     /* Insert the leading space as a smaller chunk. */
     node->size = leadsize;
     gChunksBySize.Insert(node);
     gChunksByAddress.Insert(node);
     node = nullptr;
   }
   if (trailsize != 0) {
     /* Insert the trailing space as a smaller chunk. */
     if (!node) {
       /*
-			 * An additional node is required, but
-			 * base_node_alloc() can cause a new base chunk to be
-			 * allocated.  Drop chunks_mtx in order to avoid
-			 * deadlock, and if node allocation fails, deallocate
-			 * the result before returning an error.
-			 */
+       * An additional node is required, but
+       * base_node_alloc() can cause a new base chunk to be
+       * allocated.  Drop chunks_mtx in order to avoid
+       * deadlock, and if node allocation fails, deallocate
+       * the result before returning an error.
+       */
       chunks_mtx.Unlock();
       node = base_node_alloc();
       if (!node) {
-        chunk_dealloc(ret, size, chunk_type);
+        chunk_dealloc(ret, aSize, chunk_type);
         return nullptr;
       }
       chunks_mtx.Lock();
     }
-    node->addr = (void*)((uintptr_t)(ret) + size);
+    node->addr = (void*)((uintptr_t)(ret) + aSize);
     node->size = trailsize;
     node->chunk_type = chunk_type;
     gChunksBySize.Insert(node);
     gChunksByAddress.Insert(node);
     node = nullptr;
   }
 
-  recycled_size -= size;
+  recycled_size -= aSize;
 
   chunks_mtx.Unlock();
 
-  if (node)
+  if (node) {
     base_node_dealloc(node);
+  }
 #ifdef MALLOC_DECOMMIT
-  pages_commit(ret, size);
+  pages_commit(ret, aSize);
   // pages_commit is guaranteed to zero the chunk.
-  if (zeroed) {
-    *zeroed = true;
+  if (aZeroed) {
+    *aZeroed = true;
   }
 #endif
-  return (ret);
+  return ret;
 }
 
 #ifdef XP_WIN
 /*
  * On Windows, calls to VirtualAlloc and VirtualFree must be matched, making it
  * awkward to recycle allocations of varying sizes. Therefore we only allow
  * recycling when the size equals the chunksize, unless deallocation is entirely
  * disabled.
@@ -2024,52 +2026,54 @@ chunk_recycle(size_t size, size_t alignm
 /* Allocates `size` bytes of system memory aligned for `alignment`.
  * `base` indicates whether the memory will be used for the base allocator
  * (e.g. base_alloc).
  * `zeroed` is an outvalue that returns whether the allocated memory is
  * guaranteed to be full of zeroes. It can be omitted when the caller doesn't
  * care about the result.
  */
 static void*
-chunk_alloc(size_t size, size_t alignment, bool base, bool* zeroed)
+chunk_alloc(size_t aSize, size_t aAlignment, bool aBase, bool* aZeroed)
 {
   void* ret;
 
-  MOZ_ASSERT(size != 0);
-  MOZ_ASSERT((size & chunksize_mask) == 0);
-  MOZ_ASSERT(alignment != 0);
-  MOZ_ASSERT((alignment & chunksize_mask) == 0);
+  MOZ_ASSERT(aSize != 0);
+  MOZ_ASSERT((aSize & chunksize_mask) == 0);
+  MOZ_ASSERT(aAlignment != 0);
+  MOZ_ASSERT((aAlignment & chunksize_mask) == 0);
 
   // Base allocations can't be fulfilled by recycling because of
   // possible deadlock or infinite recursion.
-  if (CAN_RECYCLE(size) && !base) {
-    ret = chunk_recycle(size, alignment, zeroed);
-    if (ret)
+  if (CAN_RECYCLE(aSize) && !aBase) {
+    ret = chunk_recycle(aSize, aAlignment, aZeroed);
+    if (ret) {
       goto RETURN;
+    }
   }
-  ret = chunk_alloc_mmap(size, alignment);
-  if (zeroed)
-    *zeroed = true;
+  ret = chunk_alloc_mmap(aSize, aAlignment);
+  if (aZeroed) {
+    *aZeroed = true;
+  }
   if (ret) {
     goto RETURN;
   }
 
   /* All strategies for allocation failed. */
   ret = nullptr;
 RETURN:
 
-  if (ret && base == false) {
+  if (ret && aBase == false) {
     if (!gChunkRTree.Set(ret, ret)) {
-      chunk_dealloc(ret, size, UNKNOWN_CHUNK);
+      chunk_dealloc(ret, aSize, UNKNOWN_CHUNK);
       return nullptr;
     }
   }
 
   MOZ_ASSERT(CHUNK_ADDR2BASE(ret) == ret);
-  return (ret);
+  return ret;
 }
 
 static void
 chunk_ensure_zero(void* aPtr, size_t aSize, bool aZeroed)
 {
   if (aZeroed == false) {
     memset(aPtr, 0, aSize);
   }
@@ -2081,139 +2085,140 @@ chunk_ensure_zero(void* aPtr, size_t aSi
     for (i = 0; i < aSize / sizeof(size_t); i++) {
       MOZ_ASSERT(p[i] == 0);
     }
   }
 #endif
 }
 
 static void
-chunk_record(void* chunk, size_t size, ChunkType chunk_type)
+chunk_record(void* aChunk, size_t aSize, ChunkType aType)
 {
   extent_node_t *xnode, *node, *prev, *xprev, key;
 
-  if (chunk_type != ZEROED_CHUNK) {
-    if (pages_purge(chunk, size, chunk_type == HUGE_CHUNK)) {
-      chunk_type = ZEROED_CHUNK;
+  if (aType != ZEROED_CHUNK) {
+    if (pages_purge(aChunk, aSize, aType == HUGE_CHUNK)) {
+      aType = ZEROED_CHUNK;
     }
   }
 
   /*
-	 * Allocate a node before acquiring chunks_mtx even though it might not
-	 * be needed, because base_node_alloc() may cause a new base chunk to
-	 * be allocated, which could cause deadlock if chunks_mtx were already
-	 * held.
-	 */
+   * Allocate a node before acquiring chunks_mtx even though it might not
+   * be needed, because base_node_alloc() may cause a new base chunk to
+   * be allocated, which could cause deadlock if chunks_mtx were already
+   * held.
+   */
   xnode = base_node_alloc();
   /* Use xprev to implement conditional deferred deallocation of prev. */
   xprev = nullptr;
 
   chunks_mtx.Lock();
-  key.addr = (void*)((uintptr_t)chunk + size);
+  key.addr = (void*)((uintptr_t)aChunk + aSize);
   node = gChunksByAddress.SearchOrNext(&key);
   /* Try to coalesce forward. */
   if (node && node->addr == key.addr) {
     /*
-		 * Coalesce chunk with the following address range.  This does
-		 * not change the position within chunks_ad, so only
-		 * remove/insert from/into chunks_szad.
-		 */
+     * Coalesce chunk with the following address range.  This does
+     * not change the position within gChunksByAddress, so only
+     * remove/insert from/into gChunksBySize.
+     */
     gChunksBySize.Remove(node);
-    node->addr = chunk;
-    node->size += size;
-    if (node->chunk_type != chunk_type) {
+    node->addr = aChunk;
+    node->size += aSize;
+    if (node->chunk_type != aType) {
       node->chunk_type = RECYCLED_CHUNK;
     }
     gChunksBySize.Insert(node);
   } else {
     /* Coalescing forward failed, so insert a new node. */
     if (!xnode) {
       /*
-			 * base_node_alloc() failed, which is an exceedingly
-			 * unlikely failure.  Leak chunk; its pages have
-			 * already been purged, so this is only a virtual
-			 * memory leak.
-			 */
+       * base_node_alloc() failed, which is an exceedingly
+       * unlikely failure.  Leak chunk; its pages have
+       * already been purged, so this is only a virtual
+       * memory leak.
+       */
       goto label_return;
     }
     node = xnode;
     xnode = nullptr; /* Prevent deallocation below. */
-    node->addr = chunk;
-    node->size = size;
-    node->chunk_type = chunk_type;
+    node->addr = aChunk;
+    node->size = aSize;
+    node->chunk_type = aType;
     gChunksByAddress.Insert(node);
     gChunksBySize.Insert(node);
   }
 
   /* Try to coalesce backward. */
   prev = gChunksByAddress.Prev(node);
-  if (prev && (void*)((uintptr_t)prev->addr + prev->size) == chunk) {
+  if (prev && (void*)((uintptr_t)prev->addr + prev->size) == aChunk) {
     /*
-		 * Coalesce chunk with the previous address range.  This does
-		 * not change the position within chunks_ad, so only
-		 * remove/insert node from/into chunks_szad.
-		 */
+     * Coalesce chunk with the previous address range.  This does
+     * not change the position within gChunksByAddress, so only
+     * remove/insert node from/into gChunksBySize.
+     */
     gChunksBySize.Remove(prev);
     gChunksByAddress.Remove(prev);
 
     gChunksBySize.Remove(node);
     node->addr = prev->addr;
     node->size += prev->size;
     if (node->chunk_type != prev->chunk_type) {
       node->chunk_type = RECYCLED_CHUNK;
     }
     gChunksBySize.Insert(node);
 
     xprev = prev;
   }
 
-  recycled_size += size;
+  recycled_size += aSize;
 
 label_return:
   chunks_mtx.Unlock();
   /*
-	 * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
-	 * avoid potential deadlock.
-	 */
-  if (xnode)
+   * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
+   * avoid potential deadlock.
+   */
+  if (xnode) {
     base_node_dealloc(xnode);
-  if (xprev)
+  }
+  if (xprev) {
     base_node_dealloc(xprev);
+  }
 }
 
 static void
-chunk_dealloc(void* chunk, size_t size, ChunkType type)
+chunk_dealloc(void* aChunk, size_t aSize, ChunkType aType)
 {
-
-  MOZ_ASSERT(chunk);
-  MOZ_ASSERT(CHUNK_ADDR2BASE(chunk) == chunk);
-  MOZ_ASSERT(size != 0);
-  MOZ_ASSERT((size & chunksize_mask) == 0);
-
-  gChunkRTree.Unset(chunk);
-
-  if (CAN_RECYCLE(size)) {
+  MOZ_ASSERT(aChunk);
+  MOZ_ASSERT(CHUNK_ADDR2BASE(aChunk) == aChunk);
+  MOZ_ASSERT(aSize != 0);
+  MOZ_ASSERT((aSize & chunksize_mask) == 0);
+
+  gChunkRTree.Unset(aChunk);
+
+  if (CAN_RECYCLE(aSize)) {
     size_t recycled_so_far = load_acquire_z(&recycled_size);
     // In case some race condition put us above the limit.
     if (recycled_so_far < recycle_limit) {
       size_t recycle_remaining = recycle_limit - recycled_so_far;
       size_t to_recycle;
-      if (size > recycle_remaining) {
+      if (aSize > recycle_remaining) {
         to_recycle = recycle_remaining;
         // Drop pages that would overflow the recycle limit
-        pages_trim(chunk, size, 0, to_recycle);
+        pages_trim(aChunk, aSize, 0, to_recycle);
       } else {
-        to_recycle = size;
+        to_recycle = aSize;
       }
-      chunk_record(chunk, to_recycle, type);
+      chunk_record(aChunk, to_recycle, aType);
       return;
     }
   }
 
-  pages_unmap(chunk, size);
+  pages_unmap(aChunk, aSize);
 }
 
 #undef CAN_RECYCLE
 
 /*
  * End chunk management functions.
  */
 /******************************************************************************/