Bug 1229384 - In SplitRun, adjust the set of available runs after committing pages. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Thu, 26 Oct 2017 18:12:04 +0900
changeset 688089 c2e087a49f31b96ef61a69c82d3afa1356c8ac53
parent 687287 1870cce57be80c2abd6153c792d4e66d8ff8466b
child 688090 c4d70a07bb8ffd9511ae99767ddba18cf0c2d7e0
push id86668
push userbmo:mh+mozilla@glandium.org
push dateSat, 28 Oct 2017 09:51:45 +0000
reviewersnjn
bugs1229384
milestone58.0a1
Bug 1229384 - In SplitRun, adjust the set of available runs after committing pages. r?njn On its own, this change is unnecessary. But it prepares for pages_commit becoming fallible in next commit, allowing a possibly early return while leaving metadata in a consistent state: runs are still available for future use even if they couldn't be committed immediately.
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -2420,29 +2420,16 @@ arena_t::SplitRun(arena_run_t* aRun, siz
   old_ndirty = chunk->ndirty;
   run_ind = (unsigned)((uintptr_t(aRun) - uintptr_t(chunk)) >> pagesize_2pow);
   total_pages = (chunk->map[run_ind].bits & ~pagesize_mask) >> pagesize_2pow;
   need_pages = (aSize >> pagesize_2pow);
   MOZ_ASSERT(need_pages > 0);
   MOZ_ASSERT(need_pages <= total_pages);
   rem_pages = total_pages - need_pages;
 
-  mRunsAvail.Remove(&chunk->map[run_ind]);
-
-  /* Keep track of trailing unused pages for later use. */
-  if (rem_pages > 0) {
-    chunk->map[run_ind+need_pages].bits = (rem_pages <<
-        pagesize_2pow) | (chunk->map[run_ind+need_pages].bits &
-        pagesize_mask);
-    chunk->map[run_ind+total_pages-1].bits = (rem_pages <<
-        pagesize_2pow) | (chunk->map[run_ind+total_pages-1].bits &
-        pagesize_mask);
-    mRunsAvail.Insert(&chunk->map[run_ind+need_pages]);
-  }
-
   for (i = 0; i < need_pages; i++) {
     /*
      * Commit decommitted pages if necessary.  If a decommitted
      * page is encountered, commit all needed adjacent decommitted
      * pages in one operation, in order to reduce system call
      * overhead.
      */
     if (chunk->map[run_ind + i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) {
@@ -2461,25 +2448,41 @@ arena_t::SplitRun(arena_run_t* aRun, siz
 
         chunk->map[run_ind + i + j].bits &=
             ~CHUNK_MAP_MADVISED_OR_DECOMMITTED;
       }
 
 #  ifdef MALLOC_DECOMMIT
       pages_commit((void*)(uintptr_t(chunk) + ((run_ind + i) << pagesize_2pow)),
                    j << pagesize_2pow);
+      // pages_commit zeroes pages, so mark them as such. That's checked
+      // further below to avoid manually zeroing the pages.
+      for (size_t k = 0; k < j; k++) {
+        chunk->map[run_ind + i + k].bits |= CHUNK_MAP_ZEROED;
+      }
 #  endif
 
       mStats.committed += j;
-
     }
-#  ifdef MALLOC_DECOMMIT
-    else /* No need to zero since commit zeroes. */
-#  endif
-
+  }
+
+  mRunsAvail.Remove(&chunk->map[run_ind]);
+
+  /* Keep track of trailing unused pages for later use. */
+  if (rem_pages > 0) {
+    chunk->map[run_ind+need_pages].bits = (rem_pages <<
+        pagesize_2pow) | (chunk->map[run_ind+need_pages].bits &
+        pagesize_mask);
+    chunk->map[run_ind+total_pages-1].bits = (rem_pages <<
+        pagesize_2pow) | (chunk->map[run_ind+total_pages-1].bits &
+        pagesize_mask);
+    mRunsAvail.Insert(&chunk->map[run_ind+need_pages]);
+  }
+
+  for (i = 0; i < need_pages; i++) {
     /* Zero if necessary. */
     if (aZero) {
       if ((chunk->map[run_ind + i].bits & CHUNK_MAP_ZEROED) == 0) {
         memset((void*)(uintptr_t(chunk) + ((run_ind + i) << pagesize_2pow)),
                0, pagesize);
         /* CHUNK_MAP_ZEROED is cleared below. */
       }
     }