Bug 1365460 - Replace literal 0xe4/0xe5 junk/poison values with constants. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Thu, 18 May 2017 10:02:36 +0900
changeset 580748 b0a67155ceb4baaf1d51ffe88a50bae05e672313
parent 580747 4f71522f701b32ee1ec82e2babf6a7386e5c6db8
child 580749 f9fb0357a5970311decd7bb45a7eccc716bda2a3
push id59654
push userbmo:mh+mozilla@glandium.org
push dateThu, 18 May 2017 22:44:22 +0000
reviewersnjn
bugs1365460
milestone55.0a1
Bug 1365460 - Replace literal 0xe4/0xe5 junk/poison values with constants. r?njn
memory/mozjemalloc/mozjemalloc.cpp
memory/mozjemalloc/mozjemalloc_types.h
--- a/memory/mozjemalloc/mozjemalloc.cpp
+++ b/memory/mozjemalloc/mozjemalloc.cpp
@@ -1072,16 +1072,19 @@ static __thread arena_t	*arenas_map;
 #endif
 
 /*******************************/
 /*
  * Runtime configuration options.
  */
 const char	*_malloc_options = MOZ_MALLOC_OPTIONS;
 
+const uint8_t kAllocJunk = 0xe4;
+const uint8_t kAllocPoison = 0xe5;
+
 #ifndef MALLOC_PRODUCTION
 static bool	opt_abort = true;
 static bool	opt_junk = true;
 static bool	opt_poison = true;
 static bool	opt_zero = false;
 #else
 static bool	opt_abort = false;
 static const bool	opt_junk = false;
@@ -3570,17 +3573,17 @@ arena_malloc_small(arena_t *arena, size_
 
 	bin->stats.nrequests++;
 	arena->stats.nmalloc_small++;
 	arena->stats.allocated_small += size;
 	malloc_spin_unlock(&arena->lock);
 
 	if (zero == false) {
 		if (opt_junk)
-			memset(ret, 0xe4, size);
+			memset(ret, kAllocJunk, size);
 		else if (opt_zero)
 			memset(ret, 0, size);
 	} else
 		memset(ret, 0, size);
 
 	return (ret);
 }
 
@@ -3598,17 +3601,17 @@ arena_malloc_large(arena_t *arena, size_
 		return (NULL);
 	}
 	arena->stats.nmalloc_large++;
 	arena->stats.allocated_large += size;
 	malloc_spin_unlock(&arena->lock);
 
 	if (zero == false) {
 		if (opt_junk)
-			memset(ret, 0xe4, size);
+			memset(ret, kAllocJunk, size);
 		else if (opt_zero)
 			memset(ret, 0, size);
 	}
 
 	return (ret);
 }
 
 static inline void *
@@ -3692,17 +3695,17 @@ arena_palloc(arena_t *arena, size_t alig
 		}
 	}
 
 	arena->stats.nmalloc_large++;
 	arena->stats.allocated_large += size;
 	malloc_spin_unlock(&arena->lock);
 
 	if (opt_junk)
-		memset(ret, 0xe4, size);
+		memset(ret, kAllocJunk, size);
 	else if (opt_zero)
 		memset(ret, 0, size);
 	return (ret);
 }
 
 static inline void *
 ipalloc(size_t alignment, size_t size)
 {
@@ -3909,17 +3912,17 @@ arena_dalloc_small(arena_t *arena, arena
 	size_t size;
 
 	run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
 	RELEASE_ASSERT(run->magic == ARENA_RUN_MAGIC);
 	bin = run->bin;
 	size = bin->reg_size;
 
 	if (opt_poison)
-		memset(ptr, 0xe5, size);
+		memset(ptr, kAllocPoison, size);
 
 	arena_run_reg_dalloc(run, bin, ptr, size);
 	run->nfree++;
 
 	if (run->nfree == bin->nregs) {
 		/* Deallocate run. */
 		if (run == bin->runcur)
 			bin->runcur = NULL;
@@ -3985,17 +3988,17 @@ arena_dalloc_small(arena_t *arena, arena
 static void
 arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
 {
 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
 	    pagesize_2pow;
 	size_t size = chunk->map[pageind].bits & ~pagesize_mask;
 
 	if (opt_poison)
-		memset(ptr, 0xe5, size);
+		memset(ptr, kAllocPoison, size);
 	arena->stats.allocated_large -= size;
 	arena->stats.ndalloc_large++;
 
 	arena_run_dalloc(arena, (arena_run_t *)ptr, true);
 }
 
 static inline void
 arena_dalloc(void *ptr, size_t offset)
@@ -4106,32 +4109,32 @@ static bool
 arena_ralloc_large(void *ptr, size_t size, size_t oldsize)
 {
 	size_t psize;
 
 	psize = PAGE_CEILING(size);
 	if (psize == oldsize) {
 		/* Same size class. */
 		if (opt_poison && size < oldsize) {
-			memset((void *)((uintptr_t)ptr + size), 0xe5, oldsize -
+			memset((void *)((uintptr_t)ptr + size), kAllocPoison, oldsize -
 			    size);
 		}
 		return (false);
 	} else {
 		arena_chunk_t *chunk;
 		arena_t *arena;
 
 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 		arena = chunk->arena;
 		RELEASE_ASSERT(arena->magic == ARENA_MAGIC);
 
 		if (psize < oldsize) {
 			/* Fill before shrinking in order avoid a race. */
 			if (opt_poison) {
-				memset((void *)((uintptr_t)ptr + size), 0xe5,
+				memset((void *)((uintptr_t)ptr + size), kAllocPoison,
 				    oldsize - size);
 			}
 			arena_ralloc_large_shrink(arena, chunk, ptr, psize,
 			    oldsize);
 			return (false);
 		} else {
 			bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
 			    psize, oldsize);
@@ -4187,17 +4190,17 @@ arena_ralloc(void *ptr, size_t size, siz
 		pages_copy(ret, ptr, copysize);
 	else
 #endif
 		memcpy(ret, ptr, copysize);
 	idalloc(ptr);
 	return (ret);
 IN_PLACE:
 	if (opt_poison && size < oldsize)
-		memset((void *)((uintptr_t)ptr + size), 0xe5, oldsize - size);
+		memset((void *)((uintptr_t)ptr + size), kAllocPoison, oldsize - size);
 	else if (opt_zero && size > oldsize)
 		memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize);
 	return (ptr);
 }
 
 static inline void *
 iralloc(void *ptr, size_t size)
 {
@@ -4430,19 +4433,19 @@ huge_palloc(size_t size, size_t alignmen
 #ifdef MALLOC_DECOMMIT
 	if (csize - psize > 0)
 		pages_decommit((void *)((uintptr_t)ret + psize), csize - psize);
 #endif
 
 	if (zero == false) {
 		if (opt_junk)
 #  ifdef MALLOC_DECOMMIT
-			memset(ret, 0xe4, psize);
+			memset(ret, kAllocJunk, psize);
 #  else
-			memset(ret, 0xe4, csize);
+			memset(ret, kAllocJunk, csize);
 #  endif
 		else if (opt_zero)
 #  ifdef MALLOC_DECOMMIT
 			memset(ret, 0, psize);
 #  else
 			memset(ret, 0, csize);
 #  endif
 	}
@@ -4457,17 +4460,17 @@ huge_ralloc(void *ptr, size_t size, size
 	size_t copysize;
 
 	/* Avoid moving the allocation if the size class would not change. */
 
 	if (oldsize > arena_maxclass &&
 	    CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
 		size_t psize = PAGE_CEILING(size);
 		if (opt_poison && size < oldsize) {
-			memset((void *)((uintptr_t)ptr + size), 0xe5, oldsize
+			memset((void *)((uintptr_t)ptr + size), kAllocPoison, oldsize
 			    - size);
 		}
 #ifdef MALLOC_DECOMMIT
 		if (psize < oldsize) {
 			extent_node_t *node, key;
 
 			pages_decommit((void *)((uintptr_t)ptr + psize),
 			    oldsize - psize);
--- a/memory/mozjemalloc/mozjemalloc_types.h
+++ b/memory/mozjemalloc/mozjemalloc_types.h
@@ -50,18 +50,18 @@ typedef unsigned char jemalloc_bool;
  * sure that the compiled results of jemalloc.c are in sync with this header
  * file.
  */
 typedef struct {
 	/*
 	 * Run-time configuration settings.
 	 */
 	jemalloc_bool	opt_abort;	/* abort(3) on error? */
-	jemalloc_bool	opt_junk;	/* Fill allocated memory with 0xe4? */
-	jemalloc_bool	opt_poison;	/* Fill free memory with 0xe5? */
+	jemalloc_bool	opt_junk;	/* Fill allocated memory with kAllocJunk? */
+	jemalloc_bool	opt_poison;	/* Fill free memory with kAllocPoison? */
 	jemalloc_bool	opt_sysv;	/* SysV semantics? */
 	jemalloc_bool	opt_xmalloc;	/* abort(3) on OOM? */
 	jemalloc_bool	opt_zero;	/* Fill allocated memory with 0x0? */
 	size_t	narenas;	/* Number of arenas. */
 	size_t	quantum;	/* Allocation quantum. */
 	size_t	small_max;	/* Max quantum-spaced allocation size. */
 	size_t	large_max;	/* Max sub-chunksize allocation size. */
 	size_t	chunksize;	/* Size of each virtual memory mapping. */