--- a/memory/build/fallback.cpp
+++ b/memory/build/fallback.cpp
@@ -6,43 +6,43 @@
#include "mozmemory.h"
#include "mozjemalloc.h"
#include <stdlib.h>
#ifndef HAVE_MEMALIGN
namespace {
-inline void* memalign(size_t aAlignment, size_t aSize)
+inline void*
+memalign(size_t aAlignment, size_t aSize)
{
#ifdef XP_WIN
return _aligned_malloc(aSize, aAlignment);
#else
void* ret;
if (posix_memalign(&ret, aAlignment, aSize) != 0) {
return nullptr;
}
return ret;
#endif
}
-
}
#endif
-struct SystemMalloc {
-#define MALLOC_DECL(name, return_type, ...) \
- static inline return_type \
- name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
- { \
- return ::name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
+struct SystemMalloc
+{
+#define MALLOC_DECL(name, return_type, ...) \
+ static inline return_type name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
+ { \
+ return ::name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
}
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
#include "malloc_decls.h"
};
-#define MALLOC_DECL(name, return_type, ...) \
- MOZ_JEMALLOC_API return_type \
- name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
- { \
- return DummyArenaAllocator<SystemMalloc>::name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
+#define MALLOC_DECL(name, return_type, ...) \
+ MOZ_JEMALLOC_API return_type name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
+ { \
+ return DummyArenaAllocator<SystemMalloc>::name( \
+ ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
}
#define MALLOC_FUNCS MALLOC_FUNCS_ARENA
#include "malloc_decls.h"
--- a/memory/build/malloc_decls.h
+++ b/memory/build/malloc_decls.h
@@ -6,65 +6,64 @@
// Helper header to declare all the supported malloc functions.
// MALLOC_DECL arguments are:
// - function name
// - return type
// - argument types
#ifndef malloc_decls_h
-# define malloc_decls_h
+#define malloc_decls_h
-# include "mozjemalloc_types.h"
+#include "mozjemalloc_types.h"
-# define MALLOC_FUNCS_MALLOC_BASE 1
-# define MALLOC_FUNCS_MALLOC_EXTRA 2
-# define MALLOC_FUNCS_MALLOC (MALLOC_FUNCS_MALLOC_BASE | \
- MALLOC_FUNCS_MALLOC_EXTRA)
-# define MALLOC_FUNCS_JEMALLOC 4
-# define MALLOC_FUNCS_INIT 8
-# define MALLOC_FUNCS_BRIDGE 16
-# define MALLOC_FUNCS_ARENA_BASE 32
-# define MALLOC_FUNCS_ARENA_ALLOC 64
-# define MALLOC_FUNCS_ARENA (MALLOC_FUNCS_ARENA_BASE | \
- MALLOC_FUNCS_ARENA_ALLOC)
-# define MALLOC_FUNCS_ALL (MALLOC_FUNCS_INIT | MALLOC_FUNCS_BRIDGE | \
- MALLOC_FUNCS_MALLOC | MALLOC_FUNCS_JEMALLOC | \
- MALLOC_FUNCS_ARENA)
+#define MALLOC_FUNCS_MALLOC_BASE 1
+#define MALLOC_FUNCS_MALLOC_EXTRA 2
+#define MALLOC_FUNCS_MALLOC \
+ (MALLOC_FUNCS_MALLOC_BASE | MALLOC_FUNCS_MALLOC_EXTRA)
+#define MALLOC_FUNCS_JEMALLOC 4
+#define MALLOC_FUNCS_INIT 8
+#define MALLOC_FUNCS_BRIDGE 16
+#define MALLOC_FUNCS_ARENA_BASE 32
+#define MALLOC_FUNCS_ARENA_ALLOC 64
+#define MALLOC_FUNCS_ARENA (MALLOC_FUNCS_ARENA_BASE | MALLOC_FUNCS_ARENA_ALLOC)
+#define MALLOC_FUNCS_ALL \
+ (MALLOC_FUNCS_INIT | MALLOC_FUNCS_BRIDGE | MALLOC_FUNCS_MALLOC | \
+ MALLOC_FUNCS_JEMALLOC | MALLOC_FUNCS_ARENA)
#endif // malloc_decls_h
#ifndef MALLOC_FUNCS
-# define MALLOC_FUNCS (MALLOC_FUNCS_MALLOC | MALLOC_FUNCS_JEMALLOC | \
- MALLOC_FUNCS_ARENA)
+#define MALLOC_FUNCS \
+ (MALLOC_FUNCS_MALLOC | MALLOC_FUNCS_JEMALLOC | MALLOC_FUNCS_ARENA)
#endif
#ifdef MALLOC_DECL
-# if MALLOC_FUNCS & MALLOC_FUNCS_INIT
-MALLOC_DECL(init, void, const malloc_table_t *)
-# endif
-# if MALLOC_FUNCS & MALLOC_FUNCS_BRIDGE
+#if MALLOC_FUNCS & MALLOC_FUNCS_INIT
+MALLOC_DECL(init, void, const malloc_table_t*)
+#endif
+#if MALLOC_FUNCS & MALLOC_FUNCS_BRIDGE
MALLOC_DECL(get_bridge, struct ReplaceMallocBridge*)
-# endif
-# if MALLOC_FUNCS & MALLOC_FUNCS_MALLOC_BASE
-MALLOC_DECL(malloc, void *, size_t)
-MALLOC_DECL(calloc, void *, size_t, size_t)
-MALLOC_DECL(realloc, void *, void *, size_t)
-MALLOC_DECL(free, void, void *)
-MALLOC_DECL(memalign, void *, size_t, size_t)
-# endif
-# if MALLOC_FUNCS & MALLOC_FUNCS_MALLOC_EXTRA
-MALLOC_DECL(posix_memalign, int, void **, size_t, size_t)
-MALLOC_DECL(aligned_alloc, void *, size_t, size_t)
-MALLOC_DECL(valloc, void *, size_t)
+#endif
+#if MALLOC_FUNCS & MALLOC_FUNCS_MALLOC_BASE
+MALLOC_DECL(malloc, void*, size_t)
+MALLOC_DECL(calloc, void*, size_t, size_t)
+MALLOC_DECL(realloc, void*, void*, size_t)
+MALLOC_DECL(free, void, void*)
+MALLOC_DECL(memalign, void*, size_t, size_t)
+#endif
+#if MALLOC_FUNCS & MALLOC_FUNCS_MALLOC_EXTRA
+MALLOC_DECL(posix_memalign, int, void**, size_t, size_t)
+MALLOC_DECL(aligned_alloc, void*, size_t, size_t)
+MALLOC_DECL(valloc, void*, size_t)
MALLOC_DECL(malloc_usable_size, size_t, usable_ptr_t)
MALLOC_DECL(malloc_good_size, size_t, size_t)
-# endif
-# if MALLOC_FUNCS & MALLOC_FUNCS_JEMALLOC
-MALLOC_DECL(jemalloc_stats, void, jemalloc_stats_t *)
+#endif
+#if MALLOC_FUNCS & MALLOC_FUNCS_JEMALLOC
+MALLOC_DECL(jemalloc_stats, void, jemalloc_stats_t*)
// On some operating systems (Mac), we use madvise(MADV_FREE) to hand pages
// back to the operating system. On Mac, the operating system doesn't take
// this memory back immediately; instead, the OS takes it back only when the
// machine is running out of physical memory.
//
// This is great from the standpoint of efficiency, but it makes measuring our
// actual RSS difficult, because pages which we've MADV_FREE'd shouldn't count
@@ -93,40 +92,40 @@ MALLOC_DECL(jemalloc_purge_freed_pages,
MALLOC_DECL(jemalloc_free_dirty_pages, void)
// Opt in or out of a thread local arena (bool argument is whether to opt-in
// (true) or out (false)).
MALLOC_DECL(jemalloc_thread_local_arena, void, bool)
// Provide information about any allocation enclosing the given address.
MALLOC_DECL(jemalloc_ptr_info, void, const void*, jemalloc_ptr_info_t*)
-# endif
+#endif
-# if MALLOC_FUNCS & MALLOC_FUNCS_ARENA_BASE
+#if MALLOC_FUNCS & MALLOC_FUNCS_ARENA_BASE
// Creates a separate arena, and returns its id, valid to use with moz_arena_*
// functions.
MALLOC_DECL(moz_create_arena, arena_id_t)
// Dispose of the given arena. Subsequent uses of the arena will fail.
MALLOC_DECL(moz_dispose_arena, void, arena_id_t)
-# endif
+#endif
-# if MALLOC_FUNCS & MALLOC_FUNCS_ARENA_ALLOC
+#if MALLOC_FUNCS & MALLOC_FUNCS_ARENA_ALLOC
// Same as the functions without the moz_arena_ prefix, but using arenas
// created with moz_create_arena.
// The contract, even if not enforced at runtime in some configurations,
// is that moz_arena_realloc and moz_arena_free will crash if the wrong
// arena id is given. All functions will crash if the arena id is invalid.
// Although discouraged, plain realloc and free can still be used on
// pointers allocated with these functions. Realloc will properly keep
// new pointers in the same arena as the original.
MALLOC_DECL(moz_arena_malloc, void*, arena_id_t, size_t)
MALLOC_DECL(moz_arena_calloc, void*, arena_id_t, size_t, size_t)
MALLOC_DECL(moz_arena_realloc, void*, arena_id_t, void*, size_t)
MALLOC_DECL(moz_arena_free, void, arena_id_t, void*)
MALLOC_DECL(moz_arena_memalign, void*, arena_id_t, size_t, size_t)
-# endif
+#endif
#endif // MALLOC_DECL
#undef MALLOC_DECL
#undef MALLOC_FUNCS
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -195,17 +195,16 @@ getenv(const char* name)
#endif
#ifndef XP_WIN
#ifndef MADV_FREE
#define MADV_FREE MADV_DONTNEED
#endif
#endif
-
// Some tools, such as /dev/dsp wrappers, LD_PRELOAD libraries that
// happen to override mmap() and call dlsym() from their overridden
// mmap(). The problem is that dlsym() calls malloc(), and this ends
// up in a dead lock in jemalloc.
// On these systems, we prefer to directly use the system call.
// We do that for Linux systems and kfreebsd with GNU userland.
// Note sanity checks are not done (alignment of offset, ...) because
// the uses of mmap are pretty limited, in jemalloc.
@@ -1016,17 +1015,18 @@ struct ArenaTreeTrait
// used by the standard API.
class ArenaCollection
{
public:
bool Init()
{
mArenas.Init();
mPrivateArenas.Init();
- mDefaultArena = mLock.Init() ? CreateArena(/* IsPrivate = */ false) : nullptr;
+ mDefaultArena =
+ mLock.Init() ? CreateArena(/* IsPrivate = */ false) : nullptr;
if (mDefaultArena) {
// arena_t constructor sets this to a lower value for thread local
// arenas; Reset to the default value for the main arena.
mDefaultArena->mMaxDirty = opt_dirty_max;
}
return bool(mDefaultArena);
}
@@ -1053,27 +1053,24 @@ public:
{
}
Item<Iterator> begin()
{
return Item<Iterator>(this, *Tree::Iterator::begin());
}
- Item<Iterator> end()
- {
- return Item<Iterator>(this, nullptr);
- }
+ Item<Iterator> end() { return Item<Iterator>(this, nullptr); }
Tree::TreeNode* Next()
{
Tree::TreeNode* result = Tree::Iterator::Next();
if (!result && mNextTree) {
- new (this) Iterator(mNextTree, nullptr);
- result = reinterpret_cast<Tree::TreeNode*>(*Tree::Iterator::begin());
+ new (this) Iterator(mNextTree, nullptr);
+ result = reinterpret_cast<Tree::TreeNode*>(*Tree::Iterator::begin());
}
return result;
}
private:
Tree* mNextTree;
};
@@ -4757,17 +4754,18 @@ MozJemalloc::moz_dispose_arena(arena_id_
}
}
#define MALLOC_DECL(name, return_type, ...) \
template<> \
inline return_type MozJemalloc::moz_arena_##name( \
arena_id_t aArenaId, ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
{ \
- BaseAllocator allocator(gArenas.GetById(aArenaId, /* IsPrivate = */ true));\
+ BaseAllocator allocator( \
+ gArenas.GetById(aArenaId, /* IsPrivate = */ true)); \
return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
}
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
#include "malloc_decls.h"
#else
#define MALLOC_DECL(name, return_type, ...) \
--- a/memory/build/mozjemalloc.h
+++ b/memory/build/mozjemalloc.h
@@ -11,68 +11,74 @@
#include "mozilla/MacroArgs.h"
// Macro helpers
#define MACRO_CALL(a, b) a b
// Can't use macros recursively, so we need another one doing the same as above.
#define MACRO_CALL2(a, b) a b
-#define ARGS_HELPER(name, ...) MACRO_CALL2( \
- MOZ_PASTE_PREFIX_AND_ARG_COUNT(name, ##__VA_ARGS__), \
- (__VA_ARGS__))
+#define ARGS_HELPER(name, ...) \
+ MACRO_CALL2(MOZ_PASTE_PREFIX_AND_ARG_COUNT(name, ##__VA_ARGS__), \
+ (__VA_ARGS__))
#define TYPED_ARGS0()
#define TYPED_ARGS1(t1) t1 arg1
#define TYPED_ARGS2(t1, t2) TYPED_ARGS1(t1), t2 arg2
#define TYPED_ARGS3(t1, t2, t3) TYPED_ARGS2(t1, t2), t3 arg3
#define ARGS0()
#define ARGS1(t1) arg1
#define ARGS2(t1, t2) ARGS1(t1), arg2
#define ARGS3(t1, t2, t3) ARGS2(t1, t2), arg3
#ifdef MOZ_MEMORY
// Generic interface exposing the whole public allocator API
// This facilitates the implementation of things like replace-malloc.
// Note: compilers are expected to be able to optimize out `this`.
-template <typename T>
-struct Allocator: public T {
-#define MALLOC_DECL(name, return_type, ...) \
+template<typename T>
+struct Allocator : public T
+{
+#define MALLOC_DECL(name, return_type, ...) \
static return_type name(__VA_ARGS__);
#include "malloc_decls.h"
};
// The MozJemalloc allocator
-struct MozJemallocBase {};
+struct MozJemallocBase
+{
+};
typedef Allocator<MozJemallocBase> MozJemalloc;
#ifdef MOZ_REPLACE_MALLOC
// The replace-malloc allocator
-struct ReplaceMallocBase {};
+struct ReplaceMallocBase
+{
+};
typedef Allocator<ReplaceMallocBase> ReplaceMalloc;
typedef ReplaceMalloc DefaultMalloc;
#else
typedef MozJemalloc DefaultMalloc;
#endif
#endif // MOZ_MEMORY
// Dummy implementation of the moz_arena_* API, falling back to a given
// implementation of the base allocator.
-template <typename T>
-struct DummyArenaAllocator {
+template<typename T>
+struct DummyArenaAllocator
+{
static arena_id_t moz_create_arena(void) { return 0; }
- static void moz_dispose_arena(arena_id_t) { }
+ static void moz_dispose_arena(arena_id_t) {}
-#define MALLOC_DECL(name, return_type, ...) \
- static return_type \
- moz_arena_ ## name(arena_id_t, ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
- { \
- return T::name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
+#define MALLOC_DECL(name, return_type, ...) \
+ static return_type moz_arena_##name(arena_id_t, \
+ ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
+ { \
+ return T::name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
}
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
#include "malloc_decls.h"
};
#endif
--- a/memory/build/mozjemalloc_types.h
+++ b/memory/build/mozjemalloc_types.h
@@ -54,42 +54,44 @@ extern "C" {
typedef MALLOC_USABLE_SIZE_CONST_PTR void* usable_ptr_t;
typedef size_t arena_id_t;
// jemalloc_stats() is not a stable interface. When using jemalloc_stats_t, be
// sure that the compiled results of jemalloc.c are in sync with this header
// file.
-typedef struct {
- // Run-time configuration settings.
- bool opt_junk; // Fill allocated memory with kAllocJunk?
- bool opt_zero; // Fill allocated memory with 0x0?
- size_t narenas; // Number of arenas.
- size_t quantum; // Allocation quantum.
- size_t small_max; // Max quantum-spaced allocation size.
- size_t large_max; // Max sub-chunksize allocation size.
- size_t chunksize; // Size of each virtual memory mapping.
- size_t page_size; // Size of pages.
- size_t dirty_max; // Max dirty pages per arena.
+typedef struct
+{
+ // Run-time configuration settings.
+ bool opt_junk; // Fill allocated memory with kAllocJunk?
+ bool opt_zero; // Fill allocated memory with 0x0?
+ size_t narenas; // Number of arenas.
+ size_t quantum; // Allocation quantum.
+ size_t small_max; // Max quantum-spaced allocation size.
+ size_t large_max; // Max sub-chunksize allocation size.
+ size_t chunksize; // Size of each virtual memory mapping.
+ size_t page_size; // Size of pages.
+ size_t dirty_max; // Max dirty pages per arena.
- // Current memory usage statistics.
- size_t mapped; // Bytes mapped (not necessarily committed).
- size_t allocated; // Bytes allocated (committed, in use by application).
- size_t waste; // Bytes committed, not in use by the
- // application, and not intentionally left
- // unused (i.e., not dirty).
- size_t page_cache; // Committed, unused pages kept around as a
- // cache. (jemalloc calls these "dirty".)
- size_t bookkeeping; // Committed bytes used internally by the
- // allocator.
- size_t bin_unused; // Bytes committed to a bin but currently unused.
+ // Current memory usage statistics.
+ size_t mapped; // Bytes mapped (not necessarily committed).
+ size_t allocated; // Bytes allocated (committed, in use by application).
+ size_t waste; // Bytes committed, not in use by the
+ // application, and not intentionally left
+ // unused (i.e., not dirty).
+ size_t page_cache; // Committed, unused pages kept around as a
+ // cache. (jemalloc calls these "dirty".)
+ size_t bookkeeping; // Committed bytes used internally by the
+ // allocator.
+ size_t bin_unused; // Bytes committed to a bin but currently unused.
} jemalloc_stats_t;
-enum PtrInfoTag {
+enum PtrInfoTag
+{
// The pointer is not currently known to the allocator.
// 'addr' and 'size' are always 0.
TagUnknown,
// The pointer is within a live allocation.
// 'addr' and 'size' describe the allocation.
TagLiveSmall,
TagLiveLarge,
@@ -108,46 +110,43 @@ enum PtrInfoTag {
TagFreedPageZeroed,
};
// The information in jemalloc_ptr_info_t could be represented in a variety of
// ways. The chosen representation has the following properties.
// - The number of fields is minimized.
// - The 'tag' field unambiguously defines the meaning of the subsequent fields.
// Helper functions are used to group together related categories of tags.
-typedef struct {
+typedef struct
+{
enum PtrInfoTag tag;
- void* addr; // meaning depends on tag; see above
- size_t size; // meaning depends on tag; see above
+ void* addr; // meaning depends on tag; see above
+ size_t size; // meaning depends on tag; see above
} jemalloc_ptr_info_t;
static inline bool
jemalloc_ptr_is_live(jemalloc_ptr_info_t* info)
{
- return info->tag == TagLiveSmall ||
- info->tag == TagLiveLarge ||
+ return info->tag == TagLiveSmall || info->tag == TagLiveLarge ||
info->tag == TagLiveHuge;
}
static inline bool
jemalloc_ptr_is_freed(jemalloc_ptr_info_t* info)
{
- return info->tag == TagFreedSmall ||
- info->tag == TagFreedPageDirty ||
+ return info->tag == TagFreedSmall || info->tag == TagFreedPageDirty ||
info->tag == TagFreedPageDecommitted ||
- info->tag == TagFreedPageMadvised ||
- info->tag == TagFreedPageZeroed;
+ info->tag == TagFreedPageMadvised || info->tag == TagFreedPageZeroed;
}
static inline bool
jemalloc_ptr_is_freed_page(jemalloc_ptr_info_t* info)
{
return info->tag == TagFreedPageDirty ||
info->tag == TagFreedPageDecommitted ||
- info->tag == TagFreedPageMadvised ||
- info->tag == TagFreedPageZeroed;
+ info->tag == TagFreedPageMadvised || info->tag == TagFreedPageZeroed;
}
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _JEMALLOC_TYPES_H_
--- a/memory/build/mozmemory.h
+++ b/memory/build/mozmemory.h
@@ -23,37 +23,40 @@
#include "mozilla/Attributes.h"
#include "mozilla/Types.h"
#include "mozjemalloc_types.h"
#ifdef MOZ_MEMORY
// On OSX, malloc/malloc.h contains the declaration for malloc_good_size,
// which will call back in jemalloc, through the zone allocator so just use it.
#ifndef XP_DARWIN
-MOZ_MEMORY_API size_t malloc_good_size_impl(size_t size);
+MOZ_MEMORY_API size_t
+malloc_good_size_impl(size_t size);
// Note: the MOZ_GLUE_IN_PROGRAM ifdef below is there to avoid -Werror turning
// the protective if into errors. MOZ_GLUE_IN_PROGRAM is what triggers MFBT_API
// to use weak imports.
-static inline size_t _malloc_good_size(size_t size) {
-# if defined(MOZ_GLUE_IN_PROGRAM) && !defined(IMPL_MFBT)
+static inline size_t
+_malloc_good_size(size_t size)
+{
+#if defined(MOZ_GLUE_IN_PROGRAM) && !defined(IMPL_MFBT)
if (!malloc_good_size)
return size;
-# endif
+#endif
return malloc_good_size_impl(size);
}
-# define malloc_good_size _malloc_good_size
+#define malloc_good_size _malloc_good_size
#endif
-#define MALLOC_DECL(name, return_type, ...) \
+#define MALLOC_DECL(name, return_type, ...) \
MOZ_JEMALLOC_API return_type name(__VA_ARGS__);
#define MALLOC_FUNCS MALLOC_FUNCS_JEMALLOC
#include "malloc_decls.h"
#endif
-#define MALLOC_DECL(name, return_type, ...) \
+#define MALLOC_DECL(name, return_type, ...) \
MOZ_JEMALLOC_API return_type name(__VA_ARGS__);
#define MALLOC_FUNCS MALLOC_FUNCS_ARENA
#include "malloc_decls.h"
#endif // mozmemory_h
--- a/memory/build/mozmemory_wrap.cpp
+++ b/memory/build/mozmemory_wrap.cpp
@@ -5,18 +5,18 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include <string.h>
#include "mozmemory_wrap.h"
#include "mozilla/Types.h"
// Declare malloc implementation functions with the right return and
// argument types.
-#define MALLOC_DECL(name, return_type, ...) \
- MOZ_MEMORY_API return_type name ## _impl(__VA_ARGS__);
+#define MALLOC_DECL(name, return_type, ...) \
+ MOZ_MEMORY_API return_type name##_impl(__VA_ARGS__);
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
#include "malloc_decls.h"
#ifdef MOZ_WRAP_NEW_DELETE
#include <new>
MFBT_API void*
operator new(size_t size)
@@ -67,42 +67,42 @@ operator delete[](void* ptr, std::nothro
}
#endif
// strndup and strdup may be defined as macros in string.h, which would
// clash with the definitions below.
#undef strndup
#undef strdup
-MOZ_MEMORY_API char *
-strndup_impl(const char *src, size_t len)
+MOZ_MEMORY_API char*
+strndup_impl(const char* src, size_t len)
{
- char* dst = (char*) malloc_impl(len + 1);
+ char* dst = (char*)malloc_impl(len + 1);
if (dst) {
strncpy(dst, src, len);
dst[len] = '\0';
}
return dst;
}
-MOZ_MEMORY_API char *
-strdup_impl(const char *src)
+MOZ_MEMORY_API char*
+strdup_impl(const char* src)
{
size_t len = strlen(src);
return strndup_impl(src, len);
}
#ifdef ANDROID
#include <stdarg.h>
#include <stdio.h>
MOZ_MEMORY_API int
-vasprintf_impl(char **str, const char *fmt, va_list ap)
+vasprintf_impl(char** str, const char* fmt, va_list ap)
{
- char* ptr, *_ptr;
+ char *ptr, *_ptr;
int ret;
if (str == NULL || fmt == NULL) {
return -1;
}
ptr = (char*)malloc_impl(128);
if (ptr == NULL) {
@@ -125,40 +125,40 @@ vasprintf_impl(char **str, const char *f
}
*str = _ptr;
return ret;
}
MOZ_MEMORY_API int
-asprintf_impl(char **str, const char *fmt, ...)
+asprintf_impl(char** str, const char* fmt, ...)
{
- int ret;
- va_list ap;
- va_start(ap, fmt);
+ int ret;
+ va_list ap;
+ va_start(ap, fmt);
- ret = vasprintf_impl(str, fmt, ap);
+ ret = vasprintf_impl(str, fmt, ap);
- va_end(ap);
+ va_end(ap);
- return ret;
+ return ret;
}
#endif
#ifdef XP_WIN
#include <wchar.h>
// We also need to provide our own impl of wcsdup so that we don't ask
// the CRT for memory from its heap (which will then be unfreeable).
MOZ_MEMORY_API wchar_t*
wcsdup_impl(const wchar_t* src)
{
size_t len = wcslen(src);
- wchar_t *dst = (wchar_t*) malloc_impl((len + 1) * sizeof(wchar_t));
+ wchar_t* dst = (wchar_t*)malloc_impl((len + 1) * sizeof(wchar_t));
if (dst)
wcsncpy(dst, src, len + 1);
return dst;
}
MOZ_MEMORY_API void*
_aligned_malloc(size_t size, size_t alignment)
{
--- a/memory/build/mozmemory_wrap.h
+++ b/memory/build/mozmemory_wrap.h
@@ -84,86 +84,86 @@
//
// Within libmozglue (when MOZ_MEMORY_IMPL is defined), all the functions
// should be suffixed with "_impl" both for declarations and use.
// That is, the implementation declaration for e.g. strdup would look like:
// char* strdup_impl(const char *)
// That implementation would call malloc by using "malloc_impl".
#if defined(MOZ_MEMORY_IMPL) && !defined(IMPL_MFBT)
-# ifdef MFBT_API // mozilla/Types.h was already included
+#ifdef MFBT_API // mozilla/Types.h was already included
# error mozmemory_wrap.h has to be included before mozilla/Types.h when MOZ_MEMORY_IMPL is set and IMPL_MFBT is not.
-# endif
-# define IMPL_MFBT
+#endif
+#define IMPL_MFBT
#endif
#include "mozilla/Types.h"
#ifndef MOZ_EXTERN_C
#ifdef __cplusplus
#define MOZ_EXTERN_C extern "C"
#else
#define MOZ_EXTERN_C
#endif
#endif
#ifdef MOZ_MEMORY_IMPL
-# define MOZ_JEMALLOC_API MOZ_EXTERN_C MFBT_API
-# if defined(XP_WIN)
-# define mozmem_malloc_impl(a) je_ ## a
-# else
-# define MOZ_MEMORY_API MOZ_EXTERN_C MFBT_API
-# if defined(MOZ_WIDGET_ANDROID)
-# define MOZ_WRAP_NEW_DELETE
-# endif
-# endif
+#define MOZ_JEMALLOC_API MOZ_EXTERN_C MFBT_API
+#if defined(XP_WIN)
+#define mozmem_malloc_impl(a) je_##a
+#else
+#define MOZ_MEMORY_API MOZ_EXTERN_C MFBT_API
+#if defined(MOZ_WIDGET_ANDROID)
+#define MOZ_WRAP_NEW_DELETE
+#endif
+#endif
#endif
#ifdef XP_WIN
-# define mozmem_dup_impl(a) wrap_ ## a
+#define mozmem_dup_impl(a) wrap_##a
#endif
#if !defined(MOZ_MEMORY_IMPL)
-# define MOZ_MEMORY_API MOZ_EXTERN_C MFBT_API
-# define MOZ_JEMALLOC_API MOZ_EXTERN_C MFBT_API
+#define MOZ_MEMORY_API MOZ_EXTERN_C MFBT_API
+#define MOZ_JEMALLOC_API MOZ_EXTERN_C MFBT_API
#endif
#ifndef MOZ_MEMORY_API
-# define MOZ_MEMORY_API MOZ_EXTERN_C
+#define MOZ_MEMORY_API MOZ_EXTERN_C
#endif
#ifndef MOZ_JEMALLOC_API
-# define MOZ_JEMALLOC_API MOZ_EXTERN_C
+#define MOZ_JEMALLOC_API MOZ_EXTERN_C
#endif
#ifndef mozmem_malloc_impl
-# define mozmem_malloc_impl(a) a
+#define mozmem_malloc_impl(a) a
#endif
#ifndef mozmem_dup_impl
-# define mozmem_dup_impl(a) a
+#define mozmem_dup_impl(a) a
#endif
// Malloc implementation functions
-#define malloc_impl mozmem_malloc_impl(malloc)
-#define posix_memalign_impl mozmem_malloc_impl(posix_memalign)
-#define aligned_alloc_impl mozmem_malloc_impl(aligned_alloc)
-#define calloc_impl mozmem_malloc_impl(calloc)
-#define realloc_impl mozmem_malloc_impl(realloc)
-#define free_impl mozmem_malloc_impl(free)
-#define memalign_impl mozmem_malloc_impl(memalign)
-#define valloc_impl mozmem_malloc_impl(valloc)
-#define malloc_usable_size_impl mozmem_malloc_impl(malloc_usable_size)
-#define malloc_good_size_impl mozmem_malloc_impl(malloc_good_size)
+#define malloc_impl mozmem_malloc_impl(malloc)
+#define posix_memalign_impl mozmem_malloc_impl(posix_memalign)
+#define aligned_alloc_impl mozmem_malloc_impl(aligned_alloc)
+#define calloc_impl mozmem_malloc_impl(calloc)
+#define realloc_impl mozmem_malloc_impl(realloc)
+#define free_impl mozmem_malloc_impl(free)
+#define memalign_impl mozmem_malloc_impl(memalign)
+#define valloc_impl mozmem_malloc_impl(valloc)
+#define malloc_usable_size_impl mozmem_malloc_impl(malloc_usable_size)
+#define malloc_good_size_impl mozmem_malloc_impl(malloc_good_size)
// Duplication functions
-#define strndup_impl mozmem_dup_impl(strndup)
-#define strdup_impl mozmem_dup_impl(strdup)
+#define strndup_impl mozmem_dup_impl(strndup)
+#define strdup_impl mozmem_dup_impl(strdup)
#ifdef XP_WIN
-# define wcsdup_impl mozmem_dup_impl(wcsdup)
+#define wcsdup_impl mozmem_dup_impl(wcsdup)
#endif
// String functions
#ifdef ANDROID
// Bug 801571 and Bug 879668, libstagefright uses vasprintf, causing malloc()/
// free() to be mismatched between bionic and mozglue implementation.
-#define vasprintf_impl mozmem_dup_impl(vasprintf)
-#define asprintf_impl mozmem_dup_impl(asprintf)
+#define vasprintf_impl mozmem_dup_impl(vasprintf)
+#define asprintf_impl mozmem_dup_impl(asprintf)
#endif
#endif // mozmemory_wrap_h
--- a/memory/build/rb.h
+++ b/memory/build/rb.h
@@ -71,60 +71,49 @@
enum NodeColor
{
Black = 0,
Red = 1,
};
// Node structure.
-template <typename T>
+template<typename T>
class RedBlackTreeNode
{
T* mLeft;
// The lowest bit is the color
T* mRightAndColor;
public:
- T* Left()
- {
- return mLeft;
- }
+ T* Left() { return mLeft; }
- void SetLeft(T* aValue)
- {
- mLeft = aValue;
- }
+ void SetLeft(T* aValue) { mLeft = aValue; }
T* Right()
{
- return reinterpret_cast<T*>(
- reinterpret_cast<uintptr_t>(mRightAndColor) & uintptr_t(~1));
+ return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(mRightAndColor) &
+ uintptr_t(~1));
}
void SetRight(T* aValue)
{
mRightAndColor = reinterpret_cast<T*>(
(reinterpret_cast<uintptr_t>(aValue) & uintptr_t(~1)) | Color());
}
NodeColor Color()
{
- return static_cast<NodeColor>(reinterpret_cast<uintptr_t>(mRightAndColor) & 1);
+ return static_cast<NodeColor>(reinterpret_cast<uintptr_t>(mRightAndColor) &
+ 1);
}
- bool IsBlack()
- {
- return Color() == NodeColor::Black;
- }
+ bool IsBlack() { return Color() == NodeColor::Black; }
- bool IsRed()
- {
- return Color() == NodeColor::Red;
- }
+ bool IsRed() { return Color() == NodeColor::Red; }
void SetColor(NodeColor aColor)
{
mRightAndColor = reinterpret_cast<T*>(
(reinterpret_cast<uintptr_t>(mRightAndColor) & uintptr_t(~1)) | aColor);
}
};
@@ -140,86 +129,50 @@ public:
return First(reinterpret_cast<TreeNode*>(aStart));
}
T* Last(T* aStart = nullptr)
{
return Last(reinterpret_cast<TreeNode*>(aStart));
}
- T* Next(T* aNode)
- {
- return Next(reinterpret_cast<TreeNode*>(aNode));
- }
+ T* Next(T* aNode) { return Next(reinterpret_cast<TreeNode*>(aNode)); }
- T* Prev(T* aNode)
- {
- return Prev(reinterpret_cast<TreeNode*>(aNode));
- }
+ T* Prev(T* aNode) { return Prev(reinterpret_cast<TreeNode*>(aNode)); }
- T* Search(T* aKey)
- {
- return Search(reinterpret_cast<TreeNode*>(aKey));
- }
+ T* Search(T* aKey) { return Search(reinterpret_cast<TreeNode*>(aKey)); }
// Find a match if it exists. Otherwise, find the next greater node, if one
// exists.
T* SearchOrNext(T* aKey)
{
return SearchOrNext(reinterpret_cast<TreeNode*>(aKey));
}
- void Insert(T* aNode)
- {
- Insert(reinterpret_cast<TreeNode*>(aNode));
- }
+ void Insert(T* aNode) { Insert(reinterpret_cast<TreeNode*>(aNode)); }
- void Remove(T* aNode)
- {
- return Remove(reinterpret_cast<TreeNode*>(aNode));
- }
+ void Remove(T* aNode) { return Remove(reinterpret_cast<TreeNode*>(aNode)); }
// Helper class to avoid having all the tree traversal code further below
// have to use Trait::GetTreeNode, adding visual noise.
struct TreeNode : public T
{
- TreeNode* Left()
- {
- return (TreeNode*)Trait::GetTreeNode(this).Left();
- }
+ TreeNode* Left() { return (TreeNode*)Trait::GetTreeNode(this).Left(); }
- void SetLeft(T* aValue)
- {
- Trait::GetTreeNode(this).SetLeft(aValue);
- }
+ void SetLeft(T* aValue) { Trait::GetTreeNode(this).SetLeft(aValue); }
- TreeNode* Right()
- {
- return (TreeNode*)Trait::GetTreeNode(this).Right();
- }
+ TreeNode* Right() { return (TreeNode*)Trait::GetTreeNode(this).Right(); }
- void SetRight(T* aValue)
- {
- Trait::GetTreeNode(this).SetRight(aValue);
- }
+ void SetRight(T* aValue) { Trait::GetTreeNode(this).SetRight(aValue); }
- NodeColor Color()
- {
- return Trait::GetTreeNode(this).Color();
- }
+ NodeColor Color() { return Trait::GetTreeNode(this).Color(); }
- bool IsRed()
- {
- return Trait::GetTreeNode(this).IsRed();
- }
+ bool IsRed() { return Trait::GetTreeNode(this).IsRed(); }
- bool IsBlack()
- {
- return Trait::GetTreeNode(this).IsBlack();
- }
+ bool IsBlack() { return Trait::GetTreeNode(this).IsBlack(); }
void SetColor(NodeColor aColor)
{
Trait::GetTreeNode(this).SetColor(aColor);
}
};
private:
@@ -690,17 +643,16 @@ private:
// current path. This is critical to performance, since a series of calls to
// rb_{next,prev}() would require time proportional to (n lg n), whereas this
// implementation only requires time proportional to (n).
//
// Since the iterator caches a path down the tree, any tree modification may
// cause the cached path to become invalid. Don't modify the tree during an
// iteration.
-
// Size the path arrays such that they are always large enough, even if a
// tree consumes all of memory. Since each node must contain a minimum of
// two pointers, there can never be more nodes than:
//
// 1 << ((sizeof(void*)<<3) - (log2(sizeof(void*))+1))
//
// Since the depth of a tree is limited to 3*lg(#nodes), the maximum depth
// is:
@@ -734,17 +686,18 @@ public:
{
Iterator* mIterator;
T* mItem;
public:
Item(Iterator* aIterator, T* aItem)
: mIterator(aIterator)
, mItem(aItem)
- { }
+ {
+ }
bool operator!=(const Item& aOther) const
{
return (mIterator != aOther.mIterator) || (mItem != aOther.mItem);
}
T* operator*() const { return mItem; }
@@ -755,20 +708,17 @@ public:
}
};
Item<Iterator> begin()
{
return Item<Iterator>(this, mDepth > 0 ? mPath[mDepth - 1] : nullptr);
}
- Item<Iterator> end()
- {
- return Item<Iterator>(this, nullptr);
- }
+ Item<Iterator> end() { return Item<Iterator>(this, nullptr); }
TreeNode* Next()
{
TreeNode* node;
if ((node = mPath[mDepth - 1]->Right())) {
// The successor is the left-most node in the right subtree.
mPath[mDepth++] = node;
while ((node = mPath[mDepth - 1]->Left())) {
--- a/memory/build/replace_malloc.h
+++ b/memory/build/replace_malloc.h
@@ -74,23 +74,23 @@
MOZ_BEGIN_EXTERN_C
// MOZ_NO_REPLACE_FUNC_DECL and MOZ_REPLACE_WEAK are only defined in
// replace_malloc.c. Normally including this header will add function
// definitions.
#ifndef MOZ_NO_REPLACE_FUNC_DECL
-# ifndef MOZ_REPLACE_WEAK
-# define MOZ_REPLACE_WEAK
-# endif
+#ifndef MOZ_REPLACE_WEAK
+#define MOZ_REPLACE_WEAK
+#endif
-# define MALLOC_DECL(name, return_type, ...) \
- MOZ_EXPORT return_type replace_ ## name(__VA_ARGS__) MOZ_REPLACE_WEAK;
+#define MALLOC_DECL(name, return_type, ...) \
+ MOZ_EXPORT return_type replace_##name(__VA_ARGS__) MOZ_REPLACE_WEAK;
-# define MALLOC_FUNCS MALLOC_FUNCS_ALL
-# include "malloc_decls.h"
+#define MALLOC_FUNCS MALLOC_FUNCS_ALL
+#include "malloc_decls.h"
#endif // MOZ_NO_REPLACE_FUNC_DECL
MOZ_END_EXTERN_C
#endif // replace_malloc_h
--- a/memory/build/replace_malloc_bridge.h
+++ b/memory/build/replace_malloc_bridge.h
@@ -47,31 +47,32 @@
struct ReplaceMallocBridge;
#include "mozilla/Types.h"
MOZ_BEGIN_EXTERN_C
#ifndef REPLACE_MALLOC_IMPL
// Returns the replace-malloc bridge if there is one to be returned.
-MFBT_API ReplaceMallocBridge* get_bridge();
+MFBT_API ReplaceMallocBridge*
+get_bridge();
#endif
// Table of malloc functions.
// e.g. void* (*malloc)(size_t), etc.
-#define MALLOC_DECL(name, return_type, ...) \
- typedef return_type(name ## _impl_t)(__VA_ARGS__);
+#define MALLOC_DECL(name, return_type, ...) \
+ typedef return_type(name##_impl_t)(__VA_ARGS__);
#include "malloc_decls.h"
-#define MALLOC_DECL(name, return_type, ...) \
- name ## _impl_t * name;
+#define MALLOC_DECL(name, return_type, ...) name##_impl_t* name;
-typedef struct {
+typedef struct
+{
#include "malloc_decls.h"
} malloc_table_t;
MOZ_END_EXTERN_C
#ifdef __cplusplus
// Table of malloc hook functions.
@@ -82,35 +83,37 @@ MOZ_END_EXTERN_C
// returning it.
// The hooks corresponding to functions, like free(void*), that return no
// value, don't take an extra argument.
// The table must at least contain a pointer for malloc_hook and free_hook
// functions. They will be used as fallback if no pointer is given for
// other allocation functions, like calloc_hook.
namespace mozilla {
namespace detail {
-template <typename R, typename... Args>
-struct AllocHookType {
+template<typename R, typename... Args>
+struct AllocHookType
+{
using Type = R (*)(R, Args...);
};
-template <typename... Args>
+template<typename... Args>
struct AllocHookType<void, Args...>
{
using Type = void (*)(Args...);
};
} // namespace detail
} // namespace mozilla
-#define MALLOC_DECL(name, return_type, ...) \
- typename mozilla::detail::AllocHookType<return_type, ##__VA_ARGS__>::Type \
- name ## _hook;
+#define MALLOC_DECL(name, return_type, ...) \
+ typename mozilla::detail::AllocHookType<return_type, ##__VA_ARGS__>::Type \
+ name##_hook;
-typedef struct {
+typedef struct
+{
#include "malloc_decls.h"
// Like free_hook, but called before realloc_hook. free_hook is called
// instead of not given.
void (*realloc_hook_before)(void* aPtr);
} malloc_hook_table_t;
namespace mozilla {
namespace dmd {
@@ -125,17 +128,20 @@ struct DebugFdRegistry
virtual void UnRegisterHandle(intptr_t aFd);
};
} // namespace mozilla
struct ReplaceMallocBridge
{
- ReplaceMallocBridge() : mVersion(3) {}
+ ReplaceMallocBridge()
+ : mVersion(3)
+ {
+ }
// This method was added in version 1 of the bridge.
virtual mozilla::dmd::DMDFuncs* GetDMDFuncs() { return nullptr; }
// Send a DebugFdRegistry instance to the replace-malloc library so that
// it can register/unregister file descriptors whenever needed. The
// instance is valid until the process dies.
// This method was added in version 2 of the bridge.
@@ -149,27 +155,32 @@ struct ReplaceMallocBridge
// registered table under the same name.
// Returns nullptr if registration failed.
// If registration succeeded, a table of "pure" malloc functions is
// returned. Those "pure" malloc functions won't call hooks.
// /!\ Do not rely on registration/unregistration to be instantaneous.
// Functions from a previously registered table may still be called for
// a brief time after RegisterHook returns.
// This method was added in version 3 of the bridge.
- virtual const malloc_table_t*
- RegisterHook(const char* aName, const malloc_table_t* aTable,
- const malloc_hook_table_t* aHookTable) { return nullptr; }
+ virtual const malloc_table_t* RegisterHook(
+ const char* aName,
+ const malloc_table_t* aTable,
+ const malloc_hook_table_t* aHookTable)
+ {
+ return nullptr;
+ }
#ifndef REPLACE_MALLOC_IMPL
// Returns the replace-malloc bridge if its version is at least the
// requested one.
- static ReplaceMallocBridge* Get(int aMinimumVersion) {
+ static ReplaceMallocBridge* Get(int aMinimumVersion)
+ {
static ReplaceMallocBridge* sSingleton = get_bridge();
- return (sSingleton && sSingleton->mVersion >= aMinimumVersion)
- ? sSingleton : nullptr;
+ return (sSingleton && sSingleton->mVersion >= aMinimumVersion) ? sSingleton
+ : nullptr;
}
#endif
protected:
const int mVersion;
};
#ifndef REPLACE_MALLOC_IMPL
@@ -192,19 +203,20 @@ struct ReplaceMalloc
static void InitDebugFd(mozilla::DebugFdRegistry& aRegistry)
{
auto singleton = ReplaceMallocBridge::Get(/* minimumVersion */ 2);
if (singleton) {
singleton->InitDebugFd(aRegistry);
}
}
- static const malloc_table_t*
- RegisterHook(const char* aName, const malloc_table_t* aTable,
- const malloc_hook_table_t* aHookTable)
+ static const malloc_table_t* RegisterHook(
+ const char* aName,
+ const malloc_table_t* aTable,
+ const malloc_hook_table_t* aHookTable)
{
auto singleton = ReplaceMallocBridge::Get(/* minimumVersion */ 3);
return singleton ? singleton->RegisterHook(aName, aTable, aHookTable)
: nullptr;
}
};
#endif
--- a/memory/build/zone.c
+++ b/memory/build/zone.c
@@ -8,129 +8,148 @@
#include <stdlib.h>
#include <mach/mach_types.h>
#include "mozilla/Assertions.h"
// Malloc implementation functions are MOZ_MEMORY_API, and jemalloc
// specific functions MOZ_JEMALLOC_API; see mozmemory_wrap.h
-#define MALLOC_DECL(name, return_type, ...) \
- MOZ_MEMORY_API return_type name ## _impl(__VA_ARGS__);
+#define MALLOC_DECL(name, return_type, ...) \
+ MOZ_MEMORY_API return_type name##_impl(__VA_ARGS__);
#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
#include "malloc_decls.h"
-#define MALLOC_DECL(name, return_type, ...) \
- MOZ_JEMALLOC_API return_type name ## _impl(__VA_ARGS__);
+#define MALLOC_DECL(name, return_type, ...) \
+ MOZ_JEMALLOC_API return_type name##_impl(__VA_ARGS__);
#define MALLOC_FUNCS MALLOC_FUNCS_JEMALLOC
#include "malloc_decls.h"
// Definitions of the following structs in malloc/malloc.h might be too old
// for the built binary to run on newer versions of OSX. So use the newest
// possible version of those structs.
-typedef struct _malloc_zone_t {
- void *reserved1;
- void *reserved2;
- size_t (*size)(struct _malloc_zone_t *, const void *);
- void *(*malloc)(struct _malloc_zone_t *, size_t);
- void *(*calloc)(struct _malloc_zone_t *, size_t, size_t);
- void *(*valloc)(struct _malloc_zone_t *, size_t);
- void (*free)(struct _malloc_zone_t *, void *);
- void *(*realloc)(struct _malloc_zone_t *, void *, size_t);
- void (*destroy)(struct _malloc_zone_t *);
- const char *zone_name;
- unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned);
- void (*batch_free)(struct _malloc_zone_t *, void **, unsigned);
- struct malloc_introspection_t *introspect;
+typedef struct _malloc_zone_t
+{
+ void* reserved1;
+ void* reserved2;
+ size_t (*size)(struct _malloc_zone_t*, const void*);
+ void* (*malloc)(struct _malloc_zone_t*, size_t);
+ void* (*calloc)(struct _malloc_zone_t*, size_t, size_t);
+ void* (*valloc)(struct _malloc_zone_t*, size_t);
+ void (*free)(struct _malloc_zone_t*, void*);
+ void* (*realloc)(struct _malloc_zone_t*, void*, size_t);
+ void (*destroy)(struct _malloc_zone_t*);
+ const char* zone_name;
+ unsigned (*batch_malloc)(struct _malloc_zone_t*, size_t, void**, unsigned);
+ void (*batch_free)(struct _malloc_zone_t*, void**, unsigned);
+ struct malloc_introspection_t* introspect;
unsigned version;
- void *(*memalign)(struct _malloc_zone_t *, size_t, size_t);
- void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t);
- size_t (*pressure_relief)(struct _malloc_zone_t *, size_t);
+ void* (*memalign)(struct _malloc_zone_t*, size_t, size_t);
+ void (*free_definite_size)(struct _malloc_zone_t*, void*, size_t);
+ size_t (*pressure_relief)(struct _malloc_zone_t*, size_t);
} malloc_zone_t;
-typedef struct {
+typedef struct
+{
vm_address_t address;
vm_size_t size;
} vm_range_t;
-typedef struct malloc_statistics_t {
+typedef struct malloc_statistics_t
+{
unsigned blocks_in_use;
size_t size_in_use;
size_t max_size_in_use;
size_t size_allocated;
} malloc_statistics_t;
-typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **);
+typedef kern_return_t
+memory_reader_t(task_t, vm_address_t, vm_size_t, void**);
-typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
+typedef void
+vm_range_recorder_t(task_t, void*, unsigned type, vm_range_t*, unsigned);
-typedef struct malloc_introspection_t {
- kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t);
- size_t (*good_size)(malloc_zone_t *, size_t);
- boolean_t (*check)(malloc_zone_t *);
- void (*print)(malloc_zone_t *, boolean_t);
- void (*log)(malloc_zone_t *, void *);
- void (*force_lock)(malloc_zone_t *);
- void (*force_unlock)(malloc_zone_t *);
- void (*statistics)(malloc_zone_t *, malloc_statistics_t *);
- boolean_t (*zone_locked)(malloc_zone_t *);
- boolean_t (*enable_discharge_checking)(malloc_zone_t *);
- boolean_t (*disable_discharge_checking)(malloc_zone_t *);
- void (*discharge)(malloc_zone_t *, void *);
+typedef struct malloc_introspection_t
+{
+ kern_return_t (*enumerator)(task_t,
+ void*,
+ unsigned,
+ vm_address_t,
+ memory_reader_t,
+ vm_range_recorder_t);
+ size_t (*good_size)(malloc_zone_t*, size_t);
+ boolean_t (*check)(malloc_zone_t*);
+ void (*print)(malloc_zone_t*, boolean_t);
+ void (*log)(malloc_zone_t*, void*);
+ void (*force_lock)(malloc_zone_t*);
+ void (*force_unlock)(malloc_zone_t*);
+ void (*statistics)(malloc_zone_t*, malloc_statistics_t*);
+ boolean_t (*zone_locked)(malloc_zone_t*);
+ boolean_t (*enable_discharge_checking)(malloc_zone_t*);
+ boolean_t (*disable_discharge_checking)(malloc_zone_t*);
+ void (*discharge)(malloc_zone_t*, void*);
#ifdef __BLOCKS__
- void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *));
+ void (*enumerate_discharged_pointers)(malloc_zone_t*, void (^)(void*, void*));
#else
- void *enumerate_unavailable_without_blocks;
+ void* enumerate_unavailable_without_blocks;
#endif
- void (*reinit_lock)(malloc_zone_t *);
+ void (*reinit_lock)(malloc_zone_t*);
} malloc_introspection_t;
-extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *);
+extern kern_return_t
+malloc_get_all_zones(task_t, memory_reader_t, vm_address_t**, unsigned*);
-extern malloc_zone_t *malloc_default_zone(void);
+extern malloc_zone_t*
+malloc_default_zone(void);
-extern void malloc_zone_register(malloc_zone_t *zone);
+extern void
+malloc_zone_register(malloc_zone_t* zone);
-extern void malloc_zone_unregister(malloc_zone_t *zone);
+extern void
+malloc_zone_unregister(malloc_zone_t* zone);
-extern malloc_zone_t *malloc_default_purgeable_zone(void);
+extern malloc_zone_t*
+malloc_default_purgeable_zone(void);
-extern malloc_zone_t* malloc_zone_from_ptr(const void* ptr);
+extern malloc_zone_t*
+malloc_zone_from_ptr(const void* ptr);
-extern void malloc_zone_free(malloc_zone_t* zone, void* ptr);
+extern void
+malloc_zone_free(malloc_zone_t* zone, void* ptr);
-extern void* malloc_zone_realloc(malloc_zone_t* zone, void* ptr, size_t size);
+extern void*
+malloc_zone_realloc(malloc_zone_t* zone, void* ptr, size_t size);
// The following is a OSX zone allocator implementation.
// /!\ WARNING. It assumes the underlying malloc implementation's
// malloc_usable_size returns 0 when the given pointer is not owned by
// the allocator. Sadly, OSX does call zone_size with pointers not
// owned by the allocator.
static size_t
-zone_size(malloc_zone_t *zone, const void *ptr)
+zone_size(malloc_zone_t* zone, const void* ptr)
{
return malloc_usable_size_impl(ptr);
}
-static void *
-zone_malloc(malloc_zone_t *zone, size_t size)
+static void*
+zone_malloc(malloc_zone_t* zone, size_t size)
{
return malloc_impl(size);
}
-static void *
-zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
+static void*
+zone_calloc(malloc_zone_t* zone, size_t num, size_t size)
{
return calloc_impl(num, size);
}
-static void *
-zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
+static void*
+zone_realloc(malloc_zone_t* zone, void* ptr, size_t size)
{
if (malloc_usable_size_impl(ptr))
return realloc_impl(ptr, size);
// Sometimes, system libraries call malloc_zone_* functions with the wrong
// zone (e.g. CoreFoundation does). In that case, we need to find the real
// one. We can't call libSystem's realloc directly because we're exporting
// realloc from libmozglue and we'd pick that one, so we manually find the
@@ -158,199 +177,206 @@ other_zone_free(malloc_zone_t* original_
// The system allocator crashes voluntarily by default when a pointer can't
// be traced back to a zone. Do the same.
MOZ_RELEASE_ASSERT(zone);
MOZ_RELEASE_ASSERT(zone != original_zone);
return malloc_zone_free(zone, ptr);
}
static void
-zone_free(malloc_zone_t *zone, void *ptr)
+zone_free(malloc_zone_t* zone, void* ptr)
{
if (malloc_usable_size_impl(ptr)) {
free_impl(ptr);
return;
}
other_zone_free(zone, ptr);
}
static void
-zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
+zone_free_definite_size(malloc_zone_t* zone, void* ptr, size_t size)
{
size_t current_size = malloc_usable_size_impl(ptr);
if (current_size) {
MOZ_ASSERT(current_size == size);
free_impl(ptr);
return;
}
other_zone_free(zone, ptr);
}
-static void *
-zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
+static void*
+zone_memalign(malloc_zone_t* zone, size_t alignment, size_t size)
{
- void *ptr;
+ void* ptr;
if (posix_memalign_impl(&ptr, alignment, size) == 0)
return ptr;
return NULL;
}
-static void *
-zone_valloc(malloc_zone_t *zone, size_t size)
+static void*
+zone_valloc(malloc_zone_t* zone, size_t size)
{
return valloc_impl(size);
}
static void
-zone_destroy(malloc_zone_t *zone)
+zone_destroy(malloc_zone_t* zone)
{
// This function should never be called.
MOZ_CRASH();
}
static unsigned
-zone_batch_malloc(malloc_zone_t *zone, size_t size, void **results,
- unsigned num_requested)
+zone_batch_malloc(malloc_zone_t* zone,
+ size_t size,
+ void** results,
+ unsigned num_requested)
{
unsigned i;
for (i = 0; i < num_requested; i++) {
results[i] = malloc_impl(size);
if (!results[i])
break;
}
return i;
}
static void
-zone_batch_free(malloc_zone_t *zone, void **to_be_freed,
- unsigned num_to_be_freed)
+zone_batch_free(malloc_zone_t* zone,
+ void** to_be_freed,
+ unsigned num_to_be_freed)
{
unsigned i;
for (i = 0; i < num_to_be_freed; i++) {
zone_free(zone, to_be_freed[i]);
to_be_freed[i] = NULL;
}
}
static size_t
-zone_pressure_relief(malloc_zone_t *zone, size_t goal)
+zone_pressure_relief(malloc_zone_t* zone, size_t goal)
{
return 0;
}
static size_t
-zone_good_size(malloc_zone_t *zone, size_t size)
+zone_good_size(malloc_zone_t* zone, size_t size)
{
return malloc_good_size_impl(size);
}
static kern_return_t
-zone_enumerator(task_t task, void *data, unsigned type_mask,
- vm_address_t zone_address, memory_reader_t reader,
- vm_range_recorder_t recorder)
+zone_enumerator(task_t task,
+ void* data,
+ unsigned type_mask,
+ vm_address_t zone_address,
+ memory_reader_t reader,
+ vm_range_recorder_t recorder)
{
return KERN_SUCCESS;
}
static boolean_t
-zone_check(malloc_zone_t *zone)
+zone_check(malloc_zone_t* zone)
{
return true;
}
static void
-zone_print(malloc_zone_t *zone, boolean_t verbose)
+zone_print(malloc_zone_t* zone, boolean_t verbose)
{
}
static void
-zone_log(malloc_zone_t *zone, void *address)
+zone_log(malloc_zone_t* zone, void* address)
{
}
-extern void _malloc_prefork(void);
-extern void _malloc_postfork_child(void);
+extern void
+_malloc_prefork(void);
+extern void
+_malloc_postfork_child(void);
static void
-zone_force_lock(malloc_zone_t *zone)
+zone_force_lock(malloc_zone_t* zone)
{
// /!\ This calls into mozjemalloc. It works because we're linked in the
// same library.
_malloc_prefork();
}
static void
-zone_force_unlock(malloc_zone_t *zone)
+zone_force_unlock(malloc_zone_t* zone)
{
// /!\ This calls into mozjemalloc. It works because we're linked in the
// same library.
_malloc_postfork_child();
}
static void
-zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats)
+zone_statistics(malloc_zone_t* zone, malloc_statistics_t* stats)
{
// We make no effort to actually fill the values
stats->blocks_in_use = 0;
stats->size_in_use = 0;
stats->max_size_in_use = 0;
stats->size_allocated = 0;
}
static boolean_t
-zone_locked(malloc_zone_t *zone)
+zone_locked(malloc_zone_t* zone)
{
// Pretend no lock is being held
return false;
}
static void
-zone_reinit_lock(malloc_zone_t *zone)
+zone_reinit_lock(malloc_zone_t* zone)
{
// As of OSX 10.12, this function is only used when force_unlock would
// be used if the zone version were < 9. So just use force_unlock.
zone_force_unlock(zone);
}
static malloc_zone_t zone;
static struct malloc_introspection_t zone_introspect;
-static malloc_zone_t *get_default_zone()
+static malloc_zone_t*
+get_default_zone()
{
- malloc_zone_t **zones = NULL;
+ malloc_zone_t** zones = NULL;
unsigned int num_zones = 0;
// On OSX 10.12, malloc_default_zone returns a special zone that is not
// present in the list of registered zones. That zone uses a "lite zone"
// if one is present (apparently enabled when malloc stack logging is
// enabled), or the first registered zone otherwise. In practice this
// means unless malloc stack logging is enabled, the first registered
// zone is the default.
// So get the list of zones to get the first one, instead of relying on
// malloc_default_zone.
- if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, (vm_address_t**) &zones,
- &num_zones)) {
+ if (KERN_SUCCESS !=
+ malloc_get_all_zones(0, NULL, (vm_address_t**)&zones, &num_zones)) {
// Reset the value in case the failure happened after it was set.
num_zones = 0;
}
if (num_zones) {
return zones[0];
}
return malloc_default_zone();
}
-
- __attribute__((constructor))
-static void
+__attribute__((constructor)) static void
register_zone(void)
{
- malloc_zone_t *default_zone = get_default_zone();
+ malloc_zone_t* default_zone = get_default_zone();
zone.size = zone_size;
zone.malloc = zone_malloc;
zone.calloc = zone_calloc;
zone.valloc = zone_valloc;
zone.free = zone_free;
zone.realloc = zone_realloc;
zone.destroy = zone_destroy;
@@ -387,17 +413,17 @@ register_zone(void)
// The default purgeable zone is created lazily by OSX's libc. It uses
// the default zone when it is created for "small" allocations
// (< 15 KiB), but assumes the default zone is a scalable_zone. This
// obviously fails when the default zone is the jemalloc zone, so
// malloc_default_purgeable_zone is called beforehand so that the
// default purgeable zone is created when the default zone is still
// a scalable_zone.
- malloc_zone_t *purgeable_zone = malloc_default_purgeable_zone();
+ malloc_zone_t* purgeable_zone = malloc_default_purgeable_zone();
// Register the custom zone. At this point it won't be the default.
malloc_zone_register(&zone);
do {
// Unregister and reregister the default zone. On OSX >= 10.6,
// unregistering takes the last registered zone and places it at the
// location of the specified zone. Unregistering the default zone thus