--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -1356,69 +1356,73 @@ static inline const char *
{
return ("<jemalloc>");
}
/******************************************************************************/
static inline void
-pages_decommit(void *addr, size_t size)
+pages_decommit(void* aAddr, size_t aSize)
{
-
#ifdef XP_WIN
- /*
- * The region starting at addr may have been allocated in multiple calls
- * to VirtualAlloc and recycled, so decommitting the entire region in one
- * go may not be valid. However, since we allocate at least a chunk at a
- * time, we may touch any region in chunksized increments.
- */
- size_t pages_size = std::min(size, chunksize -
- GetChunkOffsetForPtr(addr));
- while (size > 0) {
- if (!VirtualFree(addr, pages_size, MEM_DECOMMIT))
- MOZ_CRASH();
- addr = (void *)((uintptr_t)addr + pages_size);
- size -= pages_size;
- pages_size = std::min(size, chunksize);
- }
+ /*
+ * The region starting at addr may have been allocated in multiple calls
+ * to VirtualAlloc and recycled, so decommitting the entire region in one
+ * go may not be valid. However, since we allocate at least a chunk at a
+ * time, we may touch any region in chunksized increments.
+ */
+ size_t pages_size = std::min(aSize, chunksize - GetChunkOffsetForPtr(aAddr));
+ while (aSize > 0) {
+ if (!VirtualFree(aAddr, pages_size, MEM_DECOMMIT)) {
+ MOZ_CRASH();
+ }
+ aAddr = (void*)((uintptr_t)aAddr + pages_size);
+ aSize -= pages_size;
+ pages_size = std::min(aSize, chunksize);
+ }
#else
- if (mmap(addr, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1,
- 0) == MAP_FAILED)
- MOZ_CRASH();
- MozTagAnonymousMemory(addr, size, "jemalloc-decommitted");
+ if (mmap(aAddr, aSize, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) ==
+ MAP_FAILED) {
+ MOZ_CRASH();
+ }
+ MozTagAnonymousMemory(aAddr, aSize, "jemalloc-decommitted");
#endif
}
static inline void
-pages_commit(void *addr, size_t size)
+pages_commit(void* aAddr, size_t aSize)
{
-
-# ifdef XP_WIN
- /*
- * The region starting at addr may have been allocated in multiple calls
- * to VirtualAlloc and recycled, so committing the entire region in one
- * go may not be valid. However, since we allocate at least a chunk at a
- * time, we may touch any region in chunksized increments.
- */
- size_t pages_size = std::min(size, chunksize -
- GetChunkOffsetForPtr(addr));
- while (size > 0) {
- if (!VirtualAlloc(addr, pages_size, MEM_COMMIT, PAGE_READWRITE))
- MOZ_CRASH();
- addr = (void *)((uintptr_t)addr + pages_size);
- size -= pages_size;
- pages_size = std::min(size, chunksize);
- }
-# else
- if (mmap(addr, size, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE |
- MAP_ANON, -1, 0) == MAP_FAILED)
- MOZ_CRASH();
- MozTagAnonymousMemory(addr, size, "jemalloc");
-# endif
+#ifdef XP_WIN
+ /*
+ * The region starting at addr may have been allocated in multiple calls
+ * to VirtualAlloc and recycled, so committing the entire region in one
+ * go may not be valid. However, since we allocate at least a chunk at a
+ * time, we may touch any region in chunksized increments.
+ */
+ size_t pages_size = std::min(aSize, chunksize - GetChunkOffsetForPtr(aAddr));
+ while (aSize > 0) {
+ if (!VirtualAlloc(aAddr, pages_size, MEM_COMMIT, PAGE_READWRITE)) {
+ MOZ_CRASH();
+ }
+ aAddr = (void*)((uintptr_t)aAddr + pages_size);
+ aSize -= pages_size;
+ pages_size = std::min(aSize, chunksize);
+ }
+#else
+ if (mmap(aAddr,
+ aSize,
+ PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_PRIVATE | MAP_ANON,
+ -1,
+ 0) == MAP_FAILED) {
+ MOZ_CRASH();
+ }
+ MozTagAnonymousMemory(aAddr, aSize, "jemalloc");
+#endif
}
static bool
base_pages_alloc(size_t minsize)
{
size_t csize;
size_t pminsize;
@@ -1475,25 +1479,22 @@ base_alloc(size_t aSize)
base_next_decommitted = pbase_next_addr;
base_committed += (uintptr_t)pbase_next_addr -
(uintptr_t)base_next_decommitted;
}
return ret;
}
-static void *
-base_calloc(size_t number, size_t size)
+static void*
+base_calloc(size_t aNumber, size_t aSize)
{
- void *ret;
-
- ret = base_alloc(number * size);
- memset(ret, 0, number * size);
-
- return (ret);
+ void* ret = base_alloc(aNumber * aSize);
+ memset(ret, 0, aNumber * aSize);
+ return ret;
}
static extent_node_t *
base_node_alloc(void)
{
extent_node_t *ret;
base_mtx.Lock();
@@ -3726,51 +3727,48 @@ arena_t::RallocGrowLarge(arena_chunk_t*
return true;
}
/*
* Try to resize a large allocation, in order to avoid copying. This will
* always fail if growing an object, and the following run is already in use.
*/
static bool
-arena_ralloc_large(void *ptr, size_t size, size_t oldsize)
+arena_ralloc_large(void* aPtr, size_t aSize, size_t aOldSize)
{
- size_t psize;
-
- psize = PAGE_CEILING(size);
- if (psize == oldsize) {
- /* Same size class. */
- if (size < oldsize) {
- memset((void *)((uintptr_t)ptr + size), kAllocPoison, oldsize -
- size);
- }
- return (false);
- } else {
- arena_chunk_t *chunk;
- arena_t *arena;
-
- chunk = GetChunkForPtr(ptr);
- arena = chunk->arena;
- MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
-
- if (psize < oldsize) {
- /* Fill before shrinking in order avoid a race. */
- memset((void *)((uintptr_t)ptr + size), kAllocPoison,
- oldsize - size);
- arena->RallocShrinkLarge(chunk, ptr, psize, oldsize);
- return (false);
- } else {
- bool ret = arena->RallocGrowLarge(chunk, ptr, psize, oldsize);
- if (ret == false && opt_zero) {
- memset((void *)((uintptr_t)ptr + oldsize), 0,
- size - oldsize);
- }
- return (ret);
- }
- }
+ size_t psize;
+
+ psize = PAGE_CEILING(aSize);
+ if (psize == aOldSize) {
+ /* Same size class. */
+ if (aSize < aOldSize) {
+ memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
+ }
+ return false;
+ } else {
+ arena_chunk_t* chunk;
+ arena_t* arena;
+
+ chunk = GetChunkForPtr(aPtr);
+ arena = chunk->arena;
+ MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
+
+ if (psize < aOldSize) {
+ /* Fill before shrinking in order avoid a race. */
+ memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
+ arena->RallocShrinkLarge(chunk, aPtr, psize, aOldSize);
+ return false;
+ } else {
+ bool ret = arena->RallocGrowLarge(chunk, aPtr, psize, aOldSize);
+ if (ret == false && opt_zero) {
+ memset((void*)((uintptr_t)aPtr + aOldSize), 0, aSize - aOldSize);
+ }
+ return ret;
+ }
+ }
}
static void*
arena_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena)
{
void* ret;
size_t copysize;
@@ -4781,41 +4779,42 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
MOZ_ASSERT(aStats->mapped >= aStats->allocated + aStats->waste +
aStats->page_cache + aStats->bookkeeping);
}
#ifdef MALLOC_DOUBLE_PURGE
/* Explicitly remove all of this chunk's MADV_FREE'd pages from memory. */
static void
-hard_purge_chunk(arena_chunk_t *chunk)
+hard_purge_chunk(arena_chunk_t* aChunk)
{
- /* See similar logic in arena_t::Purge(). */
-
- size_t i;
- for (i = arena_chunk_header_npages; i < chunk_npages; i++) {
- /* Find all adjacent pages with CHUNK_MAP_MADVISED set. */
- size_t npages;
- for (npages = 0;
- chunk->map[i + npages].bits & CHUNK_MAP_MADVISED && i + npages < chunk_npages;
- npages++) {
- /* Turn off the chunk's MADV_FREED bit and turn on its
- * DECOMMITTED bit. */
- MOZ_DIAGNOSTIC_ASSERT(!(chunk->map[i + npages].bits & CHUNK_MAP_DECOMMITTED));
- chunk->map[i + npages].bits ^= CHUNK_MAP_MADVISED_OR_DECOMMITTED;
- }
-
- /* We could use mincore to find out which pages are actually
- * present, but it's not clear that's better. */
- if (npages > 0) {
- pages_decommit(((char*)chunk) + (i << pagesize_2pow), npages << pagesize_2pow);
- pages_commit(((char*)chunk) + (i << pagesize_2pow), npages << pagesize_2pow);
- }
- i += npages;
- }
+ /* See similar logic in arena_t::Purge(). */
+ for (size_t i = arena_chunk_header_npages; i < chunk_npages; i++) {
+ /* Find all adjacent pages with CHUNK_MAP_MADVISED set. */
+ size_t npages;
+ for (npages = 0; aChunk->map[i + npages].bits & CHUNK_MAP_MADVISED &&
+ i + npages < chunk_npages;
+ npages++) {
+ /* Turn off the chunk's MADV_FREED bit and turn on its
+ * DECOMMITTED bit. */
+ MOZ_DIAGNOSTIC_ASSERT(
+ !(aChunk->map[i + npages].bits & CHUNK_MAP_DECOMMITTED));
+ aChunk->map[i + npages].bits ^= CHUNK_MAP_MADVISED_OR_DECOMMITTED;
+ }
+
+ /* We could use mincore to find out which pages are actually
+ * present, but it's not clear that's better. */
+ if (npages > 0) {
+ pages_decommit(((char*)aChunk) + (i << pagesize_2pow),
+ npages << pagesize_2pow);
+ pages_commit(((char*)aChunk) + (i << pagesize_2pow),
+ npages << pagesize_2pow);
+ }
+ i += npages;
+ }
}
/* Explicitly remove all of this arena's MADV_FREE'd pages from memory. */
void
arena_t::HardPurge()
{
MutexAutoLock lock(mLock);