Bug 1402283 - Replace isalloc/isalloc_validate with static methods of a new AllocInfo class. r?njn
Both functions do essentially the same thing, one having more validation
than the other. We can use a template with a boolean parameter to avoid
the duplication.
Furthermore, we're soon going to require, in some cases, more
information than just the size of the allocation, so we wrap their
result in a helper class that gives information about an active
allocation.
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -3258,81 +3258,72 @@ arena_salloc(const void* ptr)
} else {
ret = mapbits & ~gPageSizeMask;
MOZ_DIAGNOSTIC_ASSERT(ret != 0);
}
return ret;
}
-// Validate ptr before assuming that it points to an allocation. Currently,
-// the following validation is performed:
-//
-// + Check that ptr is not nullptr.
-//
-// + Check that ptr lies within a mapped chunk.
-static inline size_t
-isalloc_validate(const void* aPtr)
+class AllocInfo
{
- // If the allocator is not initialized, the pointer can't belong to it.
- if (malloc_initialized == false) {
- return 0;
- }
-
- auto chunk = GetChunkForPtr(aPtr);
- if (!chunk) {
- return 0;
- }
-
- if (!gChunkRTree.Get(chunk)) {
- return 0;
- }
-
- if (chunk != aPtr) {
- MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
- return arena_salloc(aPtr);
- }
-
- extent_node_t key;
-
- // Chunk.
- key.mAddr = (void*)chunk;
- MutexAutoLock lock(huge_mtx);
- extent_node_t* node = huge.Search(&key);
- if (node) {
- return node->mSize;
- }
- return 0;
-}
-
-static inline size_t
-isalloc(const void* aPtr)
-{
- MOZ_ASSERT(aPtr);
-
- auto chunk = GetChunkForPtr(aPtr);
- if (chunk != aPtr) {
- // Region.
- MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
-
- return arena_salloc(aPtr);
- }
-
- extent_node_t key;
-
- // Chunk (huge allocation).
- MutexAutoLock lock(huge_mtx);
-
- // Extract from tree of huge allocations.
- key.mAddr = const_cast<void*>(aPtr);
- extent_node_t* node = huge.Search(&key);
- MOZ_DIAGNOSTIC_ASSERT(node);
-
- return node->mSize;
-}
+public:
+ template<bool Validate = false>
+ static inline AllocInfo Get(const void* aPtr)
+ {
+ // If the allocator is not initialized, the pointer can't belong to it.
+ if (Validate && malloc_initialized == false) {
+ return AllocInfo(0);
+ }
+
+ auto chunk = GetChunkForPtr(aPtr);
+ if (Validate) {
+ if (!chunk || !gChunkRTree.Get(chunk)) {
+ return AllocInfo(0);
+ }
+ }
+
+ if (chunk != aPtr) {
+ MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
+ return AllocInfo(arena_salloc(aPtr));
+ }
+
+ extent_node_t key;
+
+ // Huge allocation
+ key.mAddr = chunk;
+ MutexAutoLock lock(huge_mtx);
+ extent_node_t* node = huge.Search(&key);
+ if (Validate && !node) {
+ return AllocInfo(0);
+ }
+ return AllocInfo(node->mSize);
+ }
+
+ // Validate ptr before assuming that it points to an allocation. Currently,
+ // the following validation is performed:
+ //
+ // + Check that ptr is not nullptr.
+ //
+ // + Check that ptr lies within a mapped chunk.
+ static inline AllocInfo GetValidated(const void* aPtr)
+ {
+ return Get<true>(aPtr);
+ }
+
+ explicit AllocInfo(size_t aSize)
+ : mSize(aSize)
+ {
+ }
+
+ size_t Size() { return mSize; }
+
+private:
+ size_t mSize;
+};
template<>
inline void
MozJemalloc::jemalloc_ptr_info(const void* aPtr, jemalloc_ptr_info_t* aInfo)
{
arena_chunk_t* chunk = GetChunkForPtr(aPtr);
// Is the pointer null, or within one chunk's size of null?
@@ -3745,17 +3736,17 @@ arena_ralloc(void* aPtr, size_t aSize, s
static inline void*
iralloc(void* aPtr, size_t aSize, arena_t* aArena)
{
size_t oldsize;
MOZ_ASSERT(aPtr);
MOZ_ASSERT(aSize != 0);
- oldsize = isalloc(aPtr);
+ oldsize = AllocInfo::Get(aPtr).Size();
return (aSize <= gMaxLargeClass) ? arena_ralloc(aPtr, aSize, oldsize, aArena)
: huge_ralloc(aPtr, aSize, oldsize);
}
arena_t::arena_t()
{
unsigned i;
@@ -4445,17 +4436,17 @@ MozJemalloc::malloc_good_size(size_t aSi
}
return aSize;
}
template<>
inline size_t
MozJemalloc::malloc_usable_size(usable_ptr_t aPtr)
{
- return isalloc_validate(aPtr);
+ return AllocInfo::GetValidated(aPtr).Size();
}
template<>
inline void
MozJemalloc::jemalloc_stats(jemalloc_stats_t* aStats)
{
size_t non_arena_mapped, chunk_header_size;
@@ -5011,17 +5002,17 @@ MOZ_EXPORT void* (*__memalign_hook)(size
// visibility be used for interposition where available?
#error "Interposing malloc is unsafe on this system without libc malloc hooks."
#endif
#ifdef XP_WIN
void*
_recalloc(void* aPtr, size_t aCount, size_t aSize)
{
- size_t oldsize = aPtr ? isalloc(aPtr) : 0;
+ size_t oldsize = aPtr ? AllocInfo::Get(aPtr).Size() : 0;
CheckedInt<size_t> checkedSize = CheckedInt<size_t>(aCount) * aSize;
if (!checkedSize.isValid()) {
return nullptr;
}
size_t newsize = checkedSize.value();
@@ -5038,17 +5029,17 @@ void*
return aPtr;
}
// This impl of _expand doesn't ever actually expand or shrink blocks: it
// simply replies that you may continue using a shrunk block.
void*
_expand(void* aPtr, size_t newsize)
{
- if (isalloc(aPtr) >= newsize) {
+ if (AllocInfo::Get(aPtr).Size() >= newsize) {
return aPtr;
}
return nullptr;
}
size_t
_msize(void* aPtr)