--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -108,16 +108,17 @@
*/
#include "mozmemory_wrap.h"
#include "mozjemalloc.h"
#include "mozilla/Sprintf.h"
#include "mozilla/Likely.h"
#include "mozilla/DoublyLinkedList.h"
#include "mozilla/GuardObjects.h"
+#include "mozilla/UniquePtr.h"
/*
* On Linux, we use madvise(MADV_DONTNEED) to release memory back to the
* operating system. If we release 1MB of live pages with MADV_DONTNEED, our
* RSS will decrease by 1MB (almost) immediately.
*
* On Mac, we use madvise(MADV_FREE). Unlike MADV_DONTNEED on Linux, MADV_FREE
* on Mac doesn't cause the OS to release the specified pages immediately; the
@@ -1523,16 +1524,23 @@ base_node_alloc(void)
static void
base_node_dealloc(extent_node_t* aNode)
{
MutexAutoLock lock(base_mtx);
*(extent_node_t**)aNode = base_nodes;
base_nodes = aNode;
}
+struct BaseNodeFreePolicy
+{
+ void operator()(extent_node_t* aPtr) { base_node_dealloc(aPtr); }
+};
+
+using UniqueBaseNode = mozilla::UniquePtr<extent_node_t, BaseNodeFreePolicy>;
+
/*
* End Utility functions/macros.
*/
/******************************************************************************/
/*
* Begin chunk management functions.
*/
@@ -1925,41 +1933,37 @@ pages_purge(void *addr, size_t length, b
# undef JEMALLOC_MADV_ZEROS
# endif
#endif
}
static void*
chunk_recycle(size_t aSize, size_t aAlignment, bool* aZeroed)
{
- void* ret;
- extent_node_t* node;
extent_node_t key;
- size_t alloc_size, leadsize, trailsize;
- ChunkType chunk_type;
-
- alloc_size = aSize + aAlignment - chunksize;
+
+ size_t alloc_size = aSize + aAlignment - chunksize;
/* Beware size_t wrap-around. */
if (alloc_size < aSize) {
return nullptr;
}
key.addr = nullptr;
key.size = alloc_size;
chunks_mtx.Lock();
- node = gChunksBySize.SearchOrNext(&key);
+ extent_node_t* node = gChunksBySize.SearchOrNext(&key);
if (!node) {
chunks_mtx.Unlock();
return nullptr;
}
- leadsize =
+ size_t leadsize =
ALIGNMENT_CEILING((uintptr_t)node->addr, aAlignment) - (uintptr_t)node->addr;
MOZ_ASSERT(node->size >= leadsize + aSize);
- trailsize = node->size - leadsize - aSize;
- ret = (void*)((uintptr_t)node->addr + leadsize);
- chunk_type = node->chunk_type;
+ size_t trailsize = node->size - leadsize - aSize;
+ void* ret = (void*)((uintptr_t)node->addr + leadsize);
+ ChunkType chunk_type = node->chunk_type;
if (aZeroed) {
*aZeroed = (chunk_type == ZEROED_CHUNK);
}
/* Remove node from the tree. */
gChunksBySize.Remove(node);
gChunksByAddress.Remove(node);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
@@ -2028,44 +2032,35 @@ chunk_recycle(size_t aSize, size_t aAlig
* (e.g. base_alloc).
* `zeroed` is an outvalue that returns whether the allocated memory is
* guaranteed to be full of zeroes. It can be omitted when the caller doesn't
* care about the result.
*/
static void*
chunk_alloc(size_t aSize, size_t aAlignment, bool aBase, bool* aZeroed)
{
- void* ret;
+ void* ret = nullptr;
MOZ_ASSERT(aSize != 0);
MOZ_ASSERT((aSize & chunksize_mask) == 0);
MOZ_ASSERT(aAlignment != 0);
MOZ_ASSERT((aAlignment & chunksize_mask) == 0);
// Base allocations can't be fulfilled by recycling because of
// possible deadlock or infinite recursion.
if (CAN_RECYCLE(aSize) && !aBase) {
ret = chunk_recycle(aSize, aAlignment, aZeroed);
- if (ret) {
- goto RETURN;
+ }
+ if (!ret) {
+ ret = chunk_alloc_mmap(aSize, aAlignment);
+ if (aZeroed) {
+ *aZeroed = true;
}
}
- ret = chunk_alloc_mmap(aSize, aAlignment);
- if (aZeroed) {
- *aZeroed = true;
- }
- if (ret) {
- goto RETURN;
- }
-
- /* All strategies for allocation failed. */
- ret = nullptr;
-RETURN:
-
- if (ret && aBase == false) {
+ if (ret && !aBase) {
if (!gChunkRTree.Set(ret, ret)) {
chunk_dealloc(ret, aSize, UNKNOWN_CHUNK);
return nullptr;
}
}
MOZ_ASSERT(CHUNK_ADDR2BASE(ret) == ret);
return ret;
@@ -2087,37 +2082,39 @@ chunk_ensure_zero(void* aPtr, size_t aSi
}
}
#endif
}
static void
chunk_record(void* aChunk, size_t aSize, ChunkType aType)
{
- extent_node_t *xnode, *node, *prev, *xprev, key;
+ extent_node_t key;
if (aType != ZEROED_CHUNK) {
if (pages_purge(aChunk, aSize, aType == HUGE_CHUNK)) {
aType = ZEROED_CHUNK;
}
}
/*
* Allocate a node before acquiring chunks_mtx even though it might not
* be needed, because base_node_alloc() may cause a new base chunk to
* be allocated, which could cause deadlock if chunks_mtx were already
* held.
*/
- xnode = base_node_alloc();
+ UniqueBaseNode xnode(base_node_alloc());
/* Use xprev to implement conditional deferred deallocation of prev. */
- xprev = nullptr;
-
- chunks_mtx.Lock();
+ UniqueBaseNode xprev;
+
+ /* RAII deallocates xnode and xprev defined above after unlocking
+ * in order to avoid potential dead-locks */
+ MutexAutoLock lock(chunks_mtx);
key.addr = (void*)((uintptr_t)aChunk + aSize);
- node = gChunksByAddress.SearchOrNext(&key);
+ extent_node_t* node = gChunksByAddress.SearchOrNext(&key);
/* Try to coalesce forward. */
if (node && node->addr == key.addr) {
/*
* Coalesce chunk with the following address range. This does
* not change the position within gChunksByAddress, so only
* remove/insert from/into gChunksBySize.
*/
gChunksBySize.Remove(node);
@@ -2131,29 +2128,28 @@ chunk_record(void* aChunk, size_t aSize,
/* Coalescing forward failed, so insert a new node. */
if (!xnode) {
/*
* base_node_alloc() failed, which is an exceedingly
* unlikely failure. Leak chunk; its pages have
* already been purged, so this is only a virtual
* memory leak.
*/
- goto label_return;
+ return;
}
- node = xnode;
- xnode = nullptr; /* Prevent deallocation below. */
+ node = xnode.release();
node->addr = aChunk;
node->size = aSize;
node->chunk_type = aType;
gChunksByAddress.Insert(node);
gChunksBySize.Insert(node);
}
/* Try to coalesce backward. */
- prev = gChunksByAddress.Prev(node);
+ extent_node_t* prev = gChunksByAddress.Prev(node);
if (prev && (void*)((uintptr_t)prev->addr + prev->size) == aChunk) {
/*
* Coalesce chunk with the previous address range. This does
* not change the position within gChunksByAddress, so only
* remove/insert node from/into gChunksBySize.
*/
gChunksBySize.Remove(prev);
gChunksByAddress.Remove(prev);
@@ -2161,33 +2157,20 @@ chunk_record(void* aChunk, size_t aSize,
gChunksBySize.Remove(node);
node->addr = prev->addr;
node->size += prev->size;
if (node->chunk_type != prev->chunk_type) {
node->chunk_type = RECYCLED_CHUNK;
}
gChunksBySize.Insert(node);
- xprev = prev;
+ xprev.reset(prev);
}
recycled_size += aSize;
-
-label_return:
- chunks_mtx.Unlock();
- /*
- * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
- * avoid potential deadlock.
- */
- if (xnode) {
- base_node_dealloc(xnode);
- }
- if (xprev) {
- base_node_dealloc(xprev);
- }
}
static void
chunk_dealloc(void* aChunk, size_t aSize, ChunkType aType)
{
MOZ_ASSERT(aChunk);
MOZ_ASSERT(CHUNK_ADDR2BASE(aChunk) == aChunk);
MOZ_ASSERT(aSize != 0);