--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -784,16 +784,18 @@ public:
arena_run_t* AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero);
void DallocRun(arena_run_t* aRun, bool aDirty);
void SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero);
void TrimRunHead(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize, size_t aNewSize);
+ void TrimRunTail(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize, size_t aNewSize, bool dirty);
+
void Purge(bool aAll);
void HardPurge();
};
/******************************************************************************/
/*
* Data.
@@ -3022,35 +3024,35 @@ arena_t::TrimRunHead(arena_chunk_t* aChu
aChunk->map[pageind].bits = (aOldSize - aNewSize) | CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED;
aChunk->map[pageind+head_npages].bits = aNewSize | CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED;
DallocRun(aRun, false);
}
-static void
-arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- size_t oldsize, size_t newsize, bool dirty)
+void
+arena_t::TrimRunTail(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize,
+ size_t aNewSize, bool aDirty)
{
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
- size_t npages = newsize >> pagesize_2pow;
-
- MOZ_ASSERT(oldsize > newsize);
-
- /*
- * Update the chunk map so that arena_t::DallocRun() can treat the
- * trailing run as separately allocated.
- */
- chunk->map[pageind].bits = newsize | CHUNK_MAP_LARGE |
- CHUNK_MAP_ALLOCATED;
- chunk->map[pageind+npages].bits = (oldsize - newsize) | CHUNK_MAP_LARGE
- | CHUNK_MAP_ALLOCATED;
-
- arena->DallocRun((arena_run_t*)(uintptr_t(run) + newsize), dirty);
+ size_t pageind = (uintptr_t(aRun) - uintptr_t(aChunk)) >> pagesize_2pow;
+ size_t npages = aNewSize >> pagesize_2pow;
+
+ MOZ_ASSERT(aOldSize > aNewSize);
+
+ /*
+ * Update the chunk map so that arena_t::RunDalloc() can treat the
+ * trailing run as separately allocated.
+ */
+ aChunk->map[pageind].bits = aNewSize | CHUNK_MAP_LARGE |
+ CHUNK_MAP_ALLOCATED;
+ aChunk->map[pageind+npages].bits = (aOldSize - aNewSize) | CHUNK_MAP_LARGE
+ | CHUNK_MAP_ALLOCATED;
+
+ DallocRun((arena_run_t*)(uintptr_t(aRun) + aNewSize), aDirty);
}
static arena_run_t *
arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
{
arena_chunk_map_t *mapelm;
arena_run_t *run;
unsigned i, remainder;
@@ -3352,32 +3354,32 @@ arena_palloc(arena_t *arena, size_t alig
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
offset = (uintptr_t)ret & (alignment - 1);
MOZ_ASSERT((offset & pagesize_mask) == 0);
MOZ_ASSERT(offset < alloc_size);
if (offset == 0)
- arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, alloc_size, size, false);
+ arena->TrimRunTail(chunk, (arena_run_t*)ret, alloc_size, size, false);
else {
size_t leadsize, trailsize;
leadsize = alignment - offset;
if (leadsize > 0) {
arena->TrimRunHead(chunk, (arena_run_t*)ret, alloc_size,
alloc_size - leadsize);
ret = (void *)((uintptr_t)ret + leadsize);
}
trailsize = alloc_size - leadsize - size;
if (trailsize != 0) {
/* Trim trailing space. */
MOZ_ASSERT(trailsize < alloc_size);
- arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, size + trailsize,
+ arena->TrimRunTail(chunk, (arena_run_t*)ret, size + trailsize,
size, false);
}
}
arena->mStats.allocated_large += size;
malloc_spin_unlock(&arena->mLock);
if (opt_junk)
@@ -3862,18 +3864,17 @@ arena_ralloc_large_shrink(arena_t *arena
MOZ_ASSERT(size < oldsize);
/*
* Shrink the run, and make trailing pages available for other
* allocations.
*/
malloc_spin_lock(&arena->mLock);
- arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
- true);
+ arena->TrimRunTail(chunk, (arena_run_t *)ptr, oldsize, size, true);
arena->mStats.allocated_large -= oldsize - size;
malloc_spin_unlock(&arena->mLock);
}
static bool
arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t size, size_t oldsize)
{