Bug 1441714 - Respect the 80 line char style rule better in nsAtomTable.cpp. r=froydnj draft
authorNicholas Nethercote <nnethercote@mozilla.com>
Wed, 28 Feb 2018 10:57:18 +1100
changeset 760708 f8fe41aa7fd43e0b2d4a5d45d0d0c42dbf7cdcf5
parent 760707 a046b394e0f9c6c99062e2dbd20fb90db2568fdc
push id100727
push usernnethercote@mozilla.com
push dateTue, 27 Feb 2018 23:57:57 +0000
reviewersfroydnj
bugs1441714
milestone60.0a1
Bug 1441714 - Respect the 80 line char style rule better in nsAtomTable.cpp. r=froydnj This mostly affects comments; some are too long, some could be longer. MozReview-Commit-ID: HprR0sIDZwU
xpcom/ds/nsAtomTable.cpp
--- a/xpcom/ds/nsAtomTable.cpp
+++ b/xpcom/ds/nsAtomTable.cpp
@@ -204,17 +204,18 @@ struct AtomTableEntry : public PLDHashEn
 static nsAtom*
   sRecentlyUsedMainThreadAtoms[RECENTLY_USED_MAIN_THREAD_ATOM_CACHE_SIZE] = {};
 
 // In order to reduce locking contention for concurrent atomization, we segment
 // the atom table into N subtables, each with a separate lock. If the hash
 // values we use to select the subtable are evenly distributed, this reduces the
 // probability of contention by a factor of N. See bug 1440824.
 //
-// NB: This is somewhat similar to the technique used by Java's ConcurrentHashTable.
+// NB: This is somewhat similar to the technique used by Java's
+// ConcurrentHashTable.
 class nsAtomSubTable
 {
   friend class nsAtomTable;
   Mutex mLock;
   PLDHashTable mTable;
   nsAtomSubTable();
   void GCLocked(GCKind aKind);
   size_t SizeOfExcludingThisLocked(MallocSizeOf aMallocSizeOf);
@@ -248,40 +249,41 @@ public:
 
   // The result of this function may be imprecise if other threads are operating
   // on atoms concurrently. It's also slow, since it triggers a GC before
   // counting.
   size_t RacySlowCount();
 
   // This hash table op is a static member of this class so that it can take
   // advantage of |friend| declarations.
-  static void AtomTableClearEntry(PLDHashTable* aTable, PLDHashEntryHdr* aEntry);
+  static void AtomTableClearEntry(PLDHashTable* aTable,
+                                  PLDHashEntryHdr* aEntry);
 
   // We achieve measurable reduction in locking contention in parallel CSS
   // parsing by increasing the number of subtables up to 128. This has been
   // measured to have neglible impact on the performance of initialization, GC,
   // and shutdown.
   //
-  // Another important consideration is memory, since we're adding fixed overhead
-  // per content process, which we try to avoid. Measuring a mostly-empty page [1]
-  // with various numbers of subtables, we get the following deep sizes for the
-  // atom table:
+  // Another important consideration is memory, since we're adding fixed
+  // overhead per content process, which we try to avoid. Measuring a
+  // mostly-empty page [1] with various numbers of subtables, we get the
+  // following deep sizes for the atom table:
   //       1 subtable:  278K
   //       8 subtables: 279K
   //      16 subtables: 282K
   //      64 subtables: 286K
   //     128 subtables: 290K
   //
   // So 128 subtables costs us 12K relative to a single table, and 4K relative
   // to 64 subtables. Conversely, measuring parallel (6 thread) CSS parsing on
-  // tp6-facebook, a single table provides ~150ms of locking overhead per thread,
-  // 64 subtables provides ~2-3ms of overhead, and 128 subtables provides <1ms.
-  // And so while either 64 or 128 subtables would probably be acceptable,
-  // achieving a measurable reduction in contention for 4k of fixed memory
-  // overhead is probably worth it.
+  // tp6-facebook, a single table provides ~150ms of locking overhead per
+  // thread, 64 subtables provides ~2-3ms of overhead, and 128 subtables
+  // provides <1ms. And so while either 64 or 128 subtables would probably be
+  // acceptable, achieving a measurable reduction in contention for 4k of fixed
+  // memory overhead is probably worth it.
   //
   // [1] The numbers will look different for content processes with complex
   // pages loaded, but in those cases the actual atoms will dominate memory
   // usage and the overhead of extra tables will be negligible. We're mostly
   // interested in the fixed cost for nearly-empty content processes.
   const static size_t kNumSubTables = 128; // Must be power of two.
 
 private:
@@ -454,37 +456,36 @@ nsAtomSubTable::nsAtomSubTable()
 }
 
 void
 nsAtomSubTable::GCLocked(GCKind aKind)
 {
   MOZ_ASSERT(NS_IsMainThread());
   mLock.AssertCurrentThreadOwns();
 
-  int32_t removedCount = 0; // Use a non-atomic temporary for cheaper increments.
+  int32_t removedCount = 0; // A non-atomic temporary for cheaper increments.
   nsAutoCString nonZeroRefcountAtoms;
   uint32_t nonZeroRefcountAtomsCount = 0;
   for (auto i = mTable.Iter(); !i.Done(); i.Next()) {
     auto entry = static_cast<AtomTableEntry*>(i.Get());
     if (entry->mAtom->IsStaticAtom()) {
       continue;
     }
 
     nsAtom* atom = entry->mAtom;
     if (atom->mRefCnt == 0) {
       i.Remove();
       delete atom;
       ++removedCount;
     }
 #ifdef NS_FREE_PERMANENT_DATA
     else if (aKind == GCKind::Shutdown && PR_GetEnv("XPCOM_MEM_BLOAT_LOG")) {
-      // Only report leaking atoms in leak-checking builds in a run
-      // where we are checking for leaks, during shutdown. If
-      // something is anomalous, then we'll assert later in this
-      // function.
+      // Only report leaking atoms in leak-checking builds in a run where we
+      // are checking for leaks, during shutdown. If something is anomalous,
+      // then we'll assert later in this function.
       nsAutoCString name;
       atom->ToUTF8String(name);
       if (nonZeroRefcountAtomsCount == 0) {
         nonZeroRefcountAtoms = name;
       } else if (nonZeroRefcountAtomsCount < 20) {
         nonZeroRefcountAtoms += NS_LITERAL_CSTRING(",") + name;
       } else if (nonZeroRefcountAtomsCount == 20) {
         nonZeroRefcountAtoms += NS_LITERAL_CSTRING(",...");
@@ -646,20 +647,19 @@ nsAtomTable::RegisterStaticAtoms(const n
     uint32_t hash;
     AtomTableKey key(string, stringLen, &hash);
     nsAtomSubTable& table = SelectSubTable(key);
     MutexAutoLock lock(table.mLock);
     AtomTableEntry* he = table.Add(key);
 
     nsStaticAtom* atom;
     if (he->mAtom) {
-      // Disallow creating a dynamic atom, and then later, while the
-      // dynamic atom is still alive, registering that same atom as a
-      // static atom.  It causes subtle bugs, and we're programming in
-      // C++ here, not Smalltalk.
+      // Disallow creating a dynamic atom, and then later, while the dynamic
+      // atom is still alive, registering that same atom as a static atom.  It
+      // causes subtle bugs, and we're programming in C++ here, not Smalltalk.
       if (!he->mAtom->IsStaticAtom()) {
         nsAutoCString name;
         he->mAtom->ToUTF8String(name);
         MOZ_CRASH_UNSAFE_PRINTF(
           "Static atom registration for %s should be pushed back", name.get());
       }
       atom = static_cast<nsStaticAtom*>(he->mAtom);
     } else {