--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -688,29 +688,33 @@ struct ExtentTreeBoundsTrait : public Ex
class SizeClass
{
public:
enum ClassType
{
Tiny,
Quantum,
SubPage,
+ Large,
};
explicit inline SizeClass(size_t aSize)
{
if (aSize <= kMaxTinyClass) {
mType = Tiny;
mSize = std::max(RoundUpPow2(aSize), kMinTinyClass);
} else if (aSize <= kMaxQuantumClass) {
mType = Quantum;
mSize = QUANTUM_CEILING(aSize);
} else if (aSize <= gMaxSubPageClass) {
mType = SubPage;
mSize = RoundUpPow2(aSize);
+ } else if (aSize <= gMaxLargeClass) {
+ mType = Large;
+ mSize = PAGE_CEILING(aSize);
} else {
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Invalid size");
}
}
SizeClass& operator=(const SizeClass& aOther) = default;
bool operator==(const SizeClass& aOther) { return aOther.mSize == mSize; }
@@ -3656,67 +3660,44 @@ arena_t::RallocGrowLarge(arena_chunk_t*
mStats.allocated_large += aSize - aOldSize;
return true;
}
return false;
}
-// Try to resize a large allocation, in order to avoid copying. This will
-// always fail if growing an object, and the following run is already in use.
-// Returns whether reallocation was successful.
-static bool
-arena_ralloc_large(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena)
-{
- size_t psize;
-
- psize = PAGE_CEILING(aSize);
- if (psize == aOldSize) {
- // Same size class.
- if (aSize < aOldSize) {
- memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
- }
- return true;
- }
-
- arena_chunk_t* chunk = GetChunkForPtr(aPtr);
-
- if (psize < aOldSize) {
- // Fill before shrinking in order avoid a race.
- memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
- aArena->RallocShrinkLarge(chunk, aPtr, psize, aOldSize);
- return true;
- }
-
- bool ret = aArena->RallocGrowLarge(chunk, aPtr, psize, aOldSize);
- if (ret && opt_zero) {
- memset((void*)((uintptr_t)aPtr + aOldSize), 0, aSize - aOldSize);
- }
- return ret;
-}
-
static void*
arena_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena)
{
void* ret;
size_t copysize;
+ SizeClass sizeClass(aSize);
// Try to avoid moving the allocation.
- if (aSize <= gMaxBinClass) {
- if (aOldSize <= gMaxBinClass && SizeClass(aSize).Size() == aOldSize) {
- if (aSize < aOldSize) {
- memset(
- (void*)(uintptr_t(aPtr) + aSize), kAllocPoison, aOldSize - aSize);
- }
+ if (aOldSize <= gMaxLargeClass && sizeClass.Size() == aOldSize) {
+ if (aSize < aOldSize) {
+ memset(
+ (void*)(uintptr_t(aPtr) + aSize), kAllocPoison, aOldSize - aSize);
+ }
+ return aPtr;
+ }
+ if (sizeClass.Type() == SizeClass::Large && aOldSize > gMaxBinClass &&
+ aOldSize <= gMaxLargeClass) {
+ arena_chunk_t* chunk = GetChunkForPtr(aPtr);
+ if (sizeClass.Size() < aOldSize) {
+ // Fill before shrinking in order to avoid a race.
+ memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
+ aArena->RallocShrinkLarge(chunk, aPtr, sizeClass.Size(), aOldSize);
return aPtr;
}
- } else if (aOldSize > gMaxBinClass && aOldSize <= gMaxLargeClass) {
- MOZ_ASSERT(aSize > gMaxBinClass);
- if (arena_ralloc_large(aPtr, aSize, aOldSize, aArena)) {
+ if (aArena->RallocGrowLarge(chunk, aPtr, sizeClass.Size(), aOldSize)) {
+ if (opt_zero) {
+ memset((void*)((uintptr_t)aPtr + aOldSize), 0, aSize - aOldSize);
+ }
return aPtr;
}
}
// If we get here, then aSize and aOldSize are different enough that we
// need to move the object. In that case, fall back to allocating new
// space and copying.
ret = aArena->Malloc(aSize, false);
@@ -4425,22 +4406,19 @@ MozJemalloc::valloc(size_t aSize)
// ***************************************************************************
// Begin non-standard functions.
// This was added by Mozilla for use by SQLite.
template<>
inline size_t
MozJemalloc::malloc_good_size(size_t aSize)
{
- if (aSize <= gMaxSubPageClass) {
- // Small
+ if (aSize <= gMaxLargeClass) {
+ // Small or large
aSize = SizeClass(aSize).Size();
- } else if (aSize <= gMaxLargeClass) {
- // Large.
- aSize = PAGE_CEILING(aSize);
} else {
// Huge. We use PAGE_CEILING to get psize, instead of using
// CHUNK_CEILING to get csize. This ensures that this
// malloc_usable_size(malloc(n)) always matches
// malloc_good_size(n).
aSize = PAGE_CEILING(aSize);
}
return aSize;