--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -110,16 +110,17 @@
#include "mozmemory_wrap.h"
#include "mozjemalloc.h"
#include "mozilla/Atomics.h"
#include "mozilla/DoublyLinkedList.h"
#include "mozilla/GuardObjects.h"
#include "mozilla/Likely.h"
#include "mozilla/Sprintf.h"
#include "mozilla/UniquePtr.h"
+#include "mozilla/Unused.h"
/*
* On Linux, we use madvise(MADV_DONTNEED) to release memory back to the
* operating system. If we release 1MB of live pages with MADV_DONTNEED, our
* RSS will decrease by 1MB (almost) immediately.
*
* On Mac, we use madvise(MADV_FREE). Unlike MADV_DONTNEED on Linux, MADV_FREE
* on Mac doesn't cause the OS to release the specified pages immediately; the
@@ -1021,17 +1022,17 @@ private:
void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
void DeallocChunk(arena_chunk_t* aChunk);
arena_run_t* AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero);
void DallocRun(arena_run_t* aRun, bool aDirty);
- void SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero);
+ MOZ_MUST_USE bool SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero);
void TrimRunHead(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize, size_t aNewSize);
void TrimRunTail(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize, size_t aNewSize, bool dirty);
inline void* MallocBinEasy(arena_bin_t* aBin, arena_run_t* aRun);
void* MallocBinHard(arena_bin_t* aBin);
@@ -1383,46 +1384,48 @@ pages_decommit(void* aAddr, size_t aSize
if (mmap(aAddr, aSize, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) ==
MAP_FAILED) {
MOZ_CRASH();
}
MozTagAnonymousMemory(aAddr, aSize, "jemalloc-decommitted");
#endif
}
-static inline void
+/* Commit pages. Returns whether pages were committed. */
+MOZ_MUST_USE static inline bool
pages_commit(void* aAddr, size_t aSize)
{
#ifdef XP_WIN
/*
* The region starting at addr may have been allocated in multiple calls
* to VirtualAlloc and recycled, so committing the entire region in one
* go may not be valid. However, since we allocate at least a chunk at a
* time, we may touch any region in chunksized increments.
*/
size_t pages_size = std::min(aSize, chunksize - GetChunkOffsetForPtr(aAddr));
while (aSize > 0) {
if (!VirtualAlloc(aAddr, pages_size, MEM_COMMIT, PAGE_READWRITE)) {
- MOZ_CRASH();
+ return false;
}
aAddr = (void*)((uintptr_t)aAddr + pages_size);
aSize -= pages_size;
pages_size = std::min(aSize, chunksize);
}
#else
if (mmap(aAddr,
aSize,
PROT_READ | PROT_WRITE,
MAP_FIXED | MAP_PRIVATE | MAP_ANON,
-1,
0) == MAP_FAILED) {
- MOZ_CRASH();
+ return false;
}
MozTagAnonymousMemory(aAddr, aSize, "jemalloc");
#endif
+ return true;
}
static bool
base_pages_alloc(size_t minsize)
{
size_t csize;
size_t pminsize;
@@ -1468,32 +1471,37 @@ base_alloc(size_t aSize)
/* Allocate. */
ret = base_next_addr;
base_next_addr = (void*)((uintptr_t)base_next_addr + csize);
/* Make sure enough pages are committed for the new allocation. */
if ((uintptr_t)base_next_addr > (uintptr_t)base_next_decommitted) {
void* pbase_next_addr = (void*)(PAGE_CEILING((uintptr_t)base_next_addr));
# ifdef MALLOC_DECOMMIT
- pages_commit(base_next_decommitted, (uintptr_t)pbase_next_addr -
- (uintptr_t)base_next_decommitted);
+ if (!pages_commit(base_next_decommitted,
+ (uintptr_t)pbase_next_addr -
+ (uintptr_t)base_next_decommitted)) {
+ return nullptr;
+ }
# endif
base_next_decommitted = pbase_next_addr;
base_committed += (uintptr_t)pbase_next_addr -
(uintptr_t)base_next_decommitted;
}
return ret;
}
static void*
base_calloc(size_t aNumber, size_t aSize)
{
void* ret = base_alloc(aNumber * aSize);
- memset(ret, 0, aNumber * aSize);
+ if (ret) {
+ memset(ret, 0, aNumber * aSize);
+ }
return ret;
}
static extent_node_t *
base_node_alloc(void)
{
extent_node_t *ret;
@@ -1990,17 +1998,19 @@ chunk_recycle(size_t aSize, size_t aAlig
gRecycledSize -= aSize;
chunks_mtx.Unlock();
if (node) {
base_node_dealloc(node);
}
#ifdef MALLOC_DECOMMIT
- pages_commit(ret, aSize);
+ if (!pages_commit(ret, aSize)) {
+ return nullptr;
+ }
// pages_commit is guaranteed to zero the chunk.
if (aZeroed) {
*aZeroed = true;
}
#endif
return ret;
}
@@ -2405,17 +2415,17 @@ arena_run_reg_dalloc(arena_run_t *run, a
run->regs_minelm = elm;
bit = regind - (elm << (SIZEOF_INT_2POW + 3));
MOZ_DIAGNOSTIC_ASSERT((run->regs_mask[elm] & (1U << bit)) == 0);
run->regs_mask[elm] |= (1U << bit);
#undef SIZE_INV
#undef SIZE_INV_SHIFT
}
-void
+bool
arena_t::SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero)
{
arena_chunk_t* chunk;
size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
chunk = GetChunkForPtr(aRun);
old_ndirty = chunk->ndirty;
run_ind = (unsigned)((uintptr_t(aRun) - uintptr_t(chunk)) >> pagesize_2pow);
@@ -2446,22 +2456,27 @@ arena_t::SplitRun(arena_run_t* aRun, siz
MOZ_ASSERT(!(chunk->map[run_ind + i + j].bits & CHUNK_MAP_DECOMMITTED &&
chunk->map[run_ind + i + j].bits & CHUNK_MAP_MADVISED));
chunk->map[run_ind + i + j].bits &=
~CHUNK_MAP_MADVISED_OR_DECOMMITTED;
}
# ifdef MALLOC_DECOMMIT
- pages_commit((void*)(uintptr_t(chunk) + ((run_ind + i) << pagesize_2pow)),
- j << pagesize_2pow);
- // pages_commit zeroes pages, so mark them as such. That's checked
- // further below to avoid manually zeroing the pages.
+ bool committed = pages_commit(
+ (void*)(uintptr_t(chunk) + ((run_ind + i) << pagesize_2pow)),
+ j << pagesize_2pow);
+ // pages_commit zeroes pages, so mark them as such if it succeeded.
+ // That's checked further below to avoid manually zeroing the pages.
for (size_t k = 0; k < j; k++) {
- chunk->map[run_ind + i + k].bits |= CHUNK_MAP_ZEROED;
+ chunk->map[run_ind + i + k].bits |=
+ committed ? CHUNK_MAP_ZEROED : CHUNK_MAP_DECOMMITTED;
+ }
+ if (!committed) {
+ return false;
}
# endif
mStats.committed += j;
}
}
mRunsAvail.Remove(&chunk->map[run_ind]);
@@ -2510,16 +2525,17 @@ arena_t::SplitRun(arena_run_t* aRun, siz
*/
if (aLarge) {
chunk->map[run_ind].bits |= aSize;
}
if (chunk->ndirty == 0 && old_ndirty > 0) {
mChunksDirty.Remove(chunk);
}
+ return true;
}
void
arena_t::InitChunk(arena_chunk_t* aChunk, bool aZeroed)
{
size_t i;
/* WARNING: The following relies on !aZeroed meaning "used to be an arena
* chunk".
@@ -2620,49 +2636,40 @@ arena_t::AllocRun(arena_bin_t* aBin, siz
key.bits = aSize | CHUNK_MAP_KEY;
mapelm = mRunsAvail.SearchOrNext(&key);
if (mapelm) {
arena_chunk_t* chunk = GetChunkForPtr(mapelm);
size_t pageind = (uintptr_t(mapelm) - uintptr_t(chunk->map)) /
sizeof(arena_chunk_map_t);
run = (arena_run_t*)(uintptr_t(chunk) + (pageind << pagesize_2pow));
- SplitRun(run, aSize, aLarge, aZero);
- return run;
- }
-
- if (mSpare) {
+ } else if (mSpare) {
/* Use the spare. */
arena_chunk_t* chunk = mSpare;
mSpare = nullptr;
run = (arena_run_t*)(uintptr_t(chunk) + (arena_chunk_header_npages << pagesize_2pow));
/* Insert the run into the tree of available runs. */
mRunsAvail.Insert(&chunk->map[arena_chunk_header_npages]);
- SplitRun(run, aSize, aLarge, aZero);
- return run;
- }
-
- /*
- * No usable runs. Create a new chunk from which to allocate
- * the run.
- */
- {
+ } else {
+ /*
+ * No usable runs. Create a new chunk from which to allocate
+ * the run.
+ */
bool zeroed;
arena_chunk_t* chunk = (arena_chunk_t*)
chunk_alloc(chunksize, chunksize, false, &zeroed);
if (!chunk) {
return nullptr;
}
InitChunk(chunk, zeroed);
run = (arena_run_t*)(uintptr_t(chunk) + (arena_chunk_header_npages << pagesize_2pow));
}
/* Update page map. */
- SplitRun(run, aSize, aLarge, aZero);
- return run;
+ return SplitRun(run, aSize, aLarge, aZero) ? run : nullptr;
}
void
arena_t::Purge(bool aAll)
{
arena_chunk_t* chunk;
size_t i, npages;
/* If all is set purge all dirty pages. */
@@ -3710,19 +3717,23 @@ arena_t::RallocGrowLarge(arena_chunk_t*
if (pageind + npages < chunk_npages && (aChunk->map[pageind+npages].bits
& CHUNK_MAP_ALLOCATED) == 0 && (aChunk->map[pageind+npages].bits &
~pagesize_mask) >= aSize - aOldSize) {
/*
* The next run is available and sufficiently large. Split the
* following run, then merge the first part with the existing
* allocation.
*/
- SplitRun((arena_run_t *)(uintptr_t(aChunk) +
- ((pageind+npages) << pagesize_2pow)), aSize - aOldSize, true,
- false);
+ if (!SplitRun((arena_run_t*)(uintptr_t(aChunk) +
+ ((pageind + npages) << pagesize_2pow)),
+ aSize - aOldSize,
+ true,
+ false)) {
+ return false;
+ }
aChunk->map[pageind].bits = aSize | CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED;
aChunk->map[pageind+npages].bits = CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED;
mStats.allocated_large += aSize - aOldSize;
return true;
@@ -4091,17 +4102,20 @@ huge_ralloc(void* aPtr, size_t aSize, si
key.addr = const_cast<void*>(aPtr);
extent_node_t* node = huge.Search(&key);
MOZ_ASSERT(node);
MOZ_ASSERT(node->size == aOldSize);
huge_allocated -= aOldSize - psize;
/* No need to change huge_mapped, because we didn't (un)map anything. */
node->size = psize;
} else if (psize > aOldSize) {
- pages_commit((void*)((uintptr_t)aPtr + aOldSize), psize - aOldSize);
+ if (!pages_commit((void*)((uintptr_t)aPtr + aOldSize),
+ psize - aOldSize)) {
+ return nullptr;
+ }
}
#endif
/* Although we don't have to commit or decommit anything if
* DECOMMIT is not defined and the size class didn't change, we
* do need to update the recorded size if the size increased,
* so malloc_usable_size doesn't return a value smaller than
* what was requested via realloc(). */
@@ -4805,18 +4819,18 @@ hard_purge_chunk(arena_chunk_t* aChunk)
aChunk->map[i + npages].bits ^= CHUNK_MAP_MADVISED_OR_DECOMMITTED;
}
/* We could use mincore to find out which pages are actually
* present, but it's not clear that's better. */
if (npages > 0) {
pages_decommit(((char*)aChunk) + (i << pagesize_2pow),
npages << pagesize_2pow);
- pages_commit(((char*)aChunk) + (i << pagesize_2pow),
- npages << pagesize_2pow);
+ mozilla::Unused << pages_commit(((char*)aChunk) + (i << pagesize_2pow),
+ npages << pagesize_2pow);
}
i += npages;
}
}
/* Explicitly remove all of this arena's MADV_FREE'd pages from memory. */
void
arena_t::HardPurge()