--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -166,32 +166,32 @@
/* use MSVC intrinsics */
#pragma intrinsic(_BitScanForward)
static __forceinline int
ffs(int x)
{
unsigned long i;
if (_BitScanForward(&i, x) != 0) {
- return (i + 1);
+ return i + 1;
}
- return (0);
+ return 0;
}
/* Implement getenv without using malloc */
static char mozillaMallocOptionsBuf[64];
#define getenv xgetenv
static char *
getenv(const char *name)
{
if (GetEnvironmentVariableA(name, mozillaMallocOptionsBuf,
sizeof(mozillaMallocOptionsBuf)) > 0) {
- return (mozillaMallocOptionsBuf);
+ return mozillaMallocOptionsBuf;
}
return nullptr;
}
#if defined(_WIN64)
typedef long long ssize_t;
#else
@@ -1346,24 +1346,24 @@ pow2_ceil(size_t x)
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
#if (SIZEOF_PTR == 8)
x |= x >> 32;
#endif
x++;
- return (x);
+ return x;
}
static inline const char *
_getprogname(void)
{
- return ("<jemalloc>");
+ return "<jemalloc>";
}
/******************************************************************************/
static inline void
pages_decommit(void* aAddr, size_t aSize)
{
#ifdef XP_WIN
@@ -1430,17 +1430,17 @@ base_pages_alloc(size_t minsize)
{
size_t csize;
size_t pminsize;
MOZ_ASSERT(minsize != 0);
csize = CHUNK_CEILING(minsize);
base_pages = chunk_alloc(csize, chunksize, true);
if (!base_pages) {
- return (true);
+ return true;
}
base_next_addr = base_pages;
base_past_addr = (void *)((uintptr_t)base_pages + csize);
/*
* Leave enough pages for minsize committed, since otherwise they would
* have to be immediately recommitted.
*/
pminsize = PAGE_CEILING(minsize);
@@ -1448,17 +1448,17 @@ base_pages_alloc(size_t minsize)
# if defined(MALLOC_DECOMMIT)
if (pminsize < csize) {
pages_decommit(base_next_decommitted, csize - pminsize);
}
# endif
base_mapped += csize;
base_committed += pminsize;
- return (false);
+ return false;
}
static void*
base_alloc(size_t aSize)
{
void* ret;
size_t csize;
@@ -1514,17 +1514,17 @@ base_node_alloc(void)
ret = base_nodes;
base_nodes = *(extent_node_t **)ret;
base_mtx.Unlock();
} else {
base_mtx.Unlock();
ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
}
- return (ret);
+ return ret;
}
static void
base_node_dealloc(extent_node_t* aNode)
{
MutexAutoLock lock(base_mtx);
*(extent_node_t**)aNode = base_nodes;
base_nodes = aNode;
@@ -1548,17 +1548,17 @@ using UniqueBaseNode = mozilla::UniquePt
#ifdef XP_WIN
static void *
pages_map(void *addr, size_t size)
{
void *ret = nullptr;
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE);
- return (ret);
+ return ret;
}
static void
pages_unmap(void *addr, size_t size)
{
if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
_malloc_message(_getprogname(),
": (malloc) Error in VirtualFree()\n");
@@ -1657,17 +1657,17 @@ pages_map(void *addr, size_t size)
#if defined(__ia64__) || (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
MOZ_ASSERT(!ret || (!check_placement && ret)
|| (check_placement && ret == addr));
#else
MOZ_ASSERT(!ret || (!addr && ret != addr)
|| (addr && ret == addr));
#endif
- return (ret);
+ return ret;
}
static void
pages_unmap(void *addr, size_t size)
{
if (munmap(addr, size) == -1) {
char buf[STRERROR_BUF];
@@ -1808,34 +1808,34 @@ pages_trim(void *addr, size_t alloc_size
MOZ_ASSERT(alloc_size >= leadsize + size);
#ifdef XP_WIN
{
void *new_addr;
pages_unmap(addr, alloc_size);
new_addr = pages_map(ret, size);
if (new_addr == ret) {
- return (ret);
+ return ret;
}
if (new_addr) {
pages_unmap(new_addr, size);
}
return nullptr;
}
#else
{
size_t trailsize = alloc_size - leadsize - size;
if (leadsize != 0) {
pages_unmap(addr, leadsize);
}
if (trailsize != 0) {
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
}
- return (ret);
+ return ret;
}
#endif
}
static void *
chunk_alloc_mmap_slow(size_t size, size_t alignment)
{
void *ret, *pages;
@@ -1852,17 +1852,17 @@ chunk_alloc_mmap_slow(size_t size, size_
return nullptr;
}
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
(uintptr_t)pages;
ret = pages_trim(pages, alloc_size, leadsize, size);
} while (!ret);
MOZ_ASSERT(ret);
- return (ret);
+ return ret;
}
static void *
chunk_alloc_mmap(size_t size, size_t alignment)
{
void *ret;
size_t offset;
@@ -1881,21 +1881,21 @@ chunk_alloc_mmap(size_t size, size_t ali
ret = pages_map(nullptr, size);
if (!ret) {
return nullptr;
}
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
pages_unmap(ret, size);
- return (chunk_alloc_mmap_slow(size, alignment));
+ return chunk_alloc_mmap_slow(size, alignment);
}
MOZ_ASSERT(ret);
- return (ret);
+ return ret;
}
/* Purge and release the pages in the chunk of length `length` at `addr` to
* the OS.
* Returns whether the pages are guaranteed to be full of zeroes when the
* function returns.
* The force_zero argument explicitly requests that the memory is guaranteed
* to be full of zeroes when the function returns.
@@ -2261,17 +2261,17 @@ choose_arena(size_t size)
if (size <= small_max) {
ret = thread_arena.get();
}
if (!ret) {
ret = thread_local_arena(false);
}
MOZ_DIAGNOSTIC_ASSERT(ret);
- return (ret);
+ return ret;
}
static inline void *
arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
{
void *ret;
unsigned i, mask, bit, regind;
@@ -2293,17 +2293,17 @@ arena_run_reg_alloc(arena_run_t *run, ar
MOZ_ASSERT(regind < bin->nregs);
ret = (void *)(((uintptr_t)run) + bin->reg0_offset
+ (bin->reg_size * regind));
/* Clear bit. */
mask ^= (1U << bit);
run->regs_mask[i] = mask;
- return (ret);
+ return ret;
}
for (i++; i < bin->regs_mask_nelms; i++) {
mask = run->regs_mask[i];
if (mask != 0) {
/* Usable allocation found. */
bit = ffs((int)mask) - 1;
@@ -2317,17 +2317,17 @@ arena_run_reg_alloc(arena_run_t *run, ar
run->regs_mask[i] = mask;
/*
* Make a note that nothing before this element
* contains a free region.
*/
run->regs_minelm = i; /* Low payoff: + (mask == 0); */
- return (ret);
+ return ret;
}
}
/* Not reached. */
MOZ_DIAGNOSTIC_ASSERT(0);
return nullptr;
}
static inline void
@@ -3078,17 +3078,17 @@ arena_bin_run_size_calc(arena_bin_t *bin
MOZ_ASSERT((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
/* Copy final settings. */
bin->run_size = good_run_size;
bin->nregs = good_nregs;
bin->regs_mask_nelms = good_mask_nelms;
bin->reg0_offset = good_reg0_offset;
- return (good_run_size);
+ return good_run_size;
}
void*
arena_t::MallocSmall(size_t aSize, bool aZero)
{
void* ret;
arena_bin_t* bin;
arena_run_t* run;
@@ -3165,17 +3165,17 @@ arena_t::MallocLarge(size_t aSize, bool
if (aZero == false) {
if (opt_junk) {
memset(ret, kAllocJunk, aSize);
} else if (opt_zero) {
memset(ret, 0, aSize);
}
}
- return (ret);
+ return ret;
}
void*
arena_t::Malloc(size_t aSize, bool aZero)
{
MOZ_DIAGNOSTIC_ASSERT(mMagic == ARENA_MAGIC);
MOZ_ASSERT(aSize != 0);
MOZ_ASSERT(QUANTUM_CEILING(aSize) <= arena_maxclass);
@@ -3365,17 +3365,17 @@ arena_salloc(const void *ptr)
arena_run_t *run = (arena_run_t *)(mapbits & ~pagesize_mask);
MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
ret = run->bin->reg_size;
} else {
ret = mapbits & ~pagesize_mask;
MOZ_DIAGNOSTIC_ASSERT(ret != 0);
}
- return (ret);
+ return ret;
}
/*
* Validate ptr before assuming that it points to an allocation. Currently,
* the following validation is performed:
*
* + Check that ptr is not nullptr.
*
@@ -4212,20 +4212,20 @@ huge_dalloc(void* aPtr)
#if defined(XP_WIN)
#define malloc_init() false
#else
static inline bool
malloc_init(void)
{
if (malloc_initialized == false) {
- return (malloc_init_hard());
+ return malloc_init_hard();
}
- return (false);
+ return false;
}
#endif
static size_t
GetKernelPageSize()
{
static size_t kernel_page_size = ([]() {
#ifdef XP_WIN