Bug 1437428 - Split ThreadInfo into three classes: ThreadInfo, RegisteredThread and ProfiledThreadData. r?njn draft
authorMarkus Stange <mstange@themasta.com>
Fri, 16 Feb 2018 22:43:51 -0500
changeset 756635 af6c4ba0c376b0d24c231ac9bf14985250913bf5
parent 756426 7566ff3df164ffd81173b446ac5b7bb39040df2f
child 756755 3189a947197e2cabbb56963a288ace0af4d042ef
push id99509
push userbmo:mstange@themasta.com
push dateSat, 17 Feb 2018 03:44:37 +0000
reviewersnjn
bugs1437428
milestone60.0a1
Bug 1437428 - Split ThreadInfo into three classes: ThreadInfo, RegisteredThread and ProfiledThreadData. r?njn The file copies and renames make this patch look a bit confusing. Here's what happens: ThreadInfo.h: Most of the code gets moved into RegisteredThread.h and ProfiledThreadData.h, but a small piece remains in ThreadInfo.h. ThreadInfo.cpp: Gets split into RegisteredThread.cpp and ProfiledThreadData.cpp. ThreadInfo.cpp itself goes away. In the mercurial changeset, I've marked ThreadInfo.h as being copied to both RegisteredThread.h and to ProfiledThreadData.h, and ThreadInfo.cpp as being copied to RegisteredThread.cpp and as being renamed to ProfiledThreadData.cpp. MozReview-Commit-ID: 1j1imAv9cTd
tools/profiler/core/ProfiledThreadData.cpp
tools/profiler/core/ProfiledThreadData.h
tools/profiler/core/RegisteredThread.cpp
tools/profiler/core/RegisteredThread.h
tools/profiler/core/ThreadInfo.cpp
tools/profiler/core/ThreadInfo.h
tools/profiler/core/platform-linux-android.cpp
tools/profiler/core/platform-macos.cpp
tools/profiler/core/platform-win32.cpp
tools/profiler/core/platform.cpp
tools/profiler/moz.build
tools/profiler/tests/gtest/ThreadProfileTest.cpp
rename from tools/profiler/core/ThreadInfo.cpp
rename to tools/profiler/core/ProfiledThreadData.cpp
--- a/tools/profiler/core/ThreadInfo.cpp
+++ b/tools/profiler/core/ProfiledThreadData.cpp
@@ -1,86 +1,44 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#include "ThreadInfo.h"
-
-#include "mozilla/DebugOnly.h"
+#include "ProfiledThreadData.h"
 
 #if defined(GP_OS_darwin)
 #include <pthread.h>
 #endif
 
 #ifdef XP_WIN
 #include <process.h>
 #define getpid _getpid
 #else
 #include <unistd.h> // for getpid()
 #endif
 
-ThreadInfo::ThreadInfo(const char* aName,
-                       int aThreadId,
-                       bool aIsMainThread,
-                       nsIEventTarget* aThread,
-                       void* aStackTop)
-  : mName(strdup(aName))
-  , mRegisterTime(TimeStamp::Now())
-  , mIsMainThread(aIsMainThread)
-  , mThread(aThread)
-  , mRacyInfo(mozilla::MakeNotNull<RacyThreadInfo*>(aThreadId))
-  , mPlatformData(AllocPlatformData(aThreadId))
-  , mStackTop(aStackTop)
-  , mIsBeingProfiled(false)
-  , mContext(nullptr)
-  , mJSSampling(INACTIVE)
-  , mLastSample()
+ProfiledThreadData::ProfiledThreadData(ThreadInfo* aThreadInfo,
+                                       nsIEventTarget* aEventTarget)
+  : mThreadInfo(aThreadInfo)
 {
-  MOZ_COUNT_CTOR(ThreadInfo);
-
-  // We don't have to guess on mac
-#if defined(GP_OS_darwin)
-  pthread_t self = pthread_self();
-  mStackTop = pthread_get_stackaddr_np(self);
-#endif
-
-  // I don't know if we can assert this. But we should warn.
-  MOZ_ASSERT(aThreadId >= 0, "native thread ID is < 0");
-  MOZ_ASSERT(aThreadId <= INT32_MAX, "native thread ID is > INT32_MAX");
+  MOZ_COUNT_CTOR(ProfiledThreadData);
+  mResponsiveness.emplace(aEventTarget, aThreadInfo->IsMainThread());
 }
 
-ThreadInfo::~ThreadInfo()
+ProfiledThreadData::~ProfiledThreadData()
 {
-  MOZ_COUNT_DTOR(ThreadInfo);
-
-  delete mRacyInfo;
+  MOZ_COUNT_DTOR(ProfiledThreadData);
 }
 
 void
-ThreadInfo::StartProfiling()
-{
-  mIsBeingProfiled = true;
-  mRacyInfo->ReinitializeOnResume();
-  mResponsiveness.emplace(mThread, mIsMainThread);
-}
-
-void
-ThreadInfo::StopProfiling()
-{
-  mResponsiveness.reset();
-  mPartialProfile = nullptr;
-  mIsBeingProfiled = false;
-}
-
-void
-ThreadInfo::StreamJSON(const ProfileBuffer& aBuffer,
-                       SpliceableJSONWriter& aWriter,
-                       const TimeStamp& aProcessStartTime, double aSinceTime)
+ProfiledThreadData::StreamJSON(const ProfileBuffer& aBuffer, JSContext* aCx,
+                               SpliceableJSONWriter& aWriter,
+                               const TimeStamp& aProcessStartTime, double aSinceTime)
 {
   UniquePtr<PartialThreadProfile> partialProfile = Move(mPartialProfile);
 
   UniquePtr<UniqueStacks> uniqueStacks = partialProfile
     ? Move(partialProfile->mUniqueStacks)
     : MakeUnique<UniqueStacks>();
 
   uniqueStacks->AdvanceStreamingGeneration();
@@ -89,20 +47,21 @@ ThreadInfo::StreamJSON(const ProfileBuff
   UniquePtr<char[]> partialMarkersJSON;
   if (partialProfile) {
     partialSamplesJSON = Move(partialProfile->mSamplesJSON);
     partialMarkersJSON = Move(partialProfile->mMarkersJSON);
   }
 
   aWriter.Start();
   {
-    StreamSamplesAndMarkers(Name(), ThreadId(), aBuffer, aWriter,
+    StreamSamplesAndMarkers(mThreadInfo->Name(), mThreadInfo->ThreadId(),
+                            aBuffer, aWriter,
                             aProcessStartTime,
-                            mRegisterTime, mUnregisterTime,
-                            aSinceTime, mContext,
+                            mThreadInfo->RegisterTime(), mUnregisterTime,
+                            aSinceTime, aCx,
                             Move(partialSamplesJSON),
                             Move(partialMarkersJSON),
                             *uniqueStacks);
 
     aWriter.StartObjectProperty("stackTable");
     {
       {
         JSONSchemaWriter schema(aWriter);
@@ -228,22 +187,23 @@ StreamSamplesAndMarkers(const char* aNam
                                   aSinceTime, aUniqueStacks);
     }
     aWriter.EndArray();
   }
   aWriter.EndObject();
 }
 
 void
-ThreadInfo::FlushSamplesAndMarkers(const TimeStamp& aProcessStartTime,
-                                   ProfileBuffer& aBuffer)
+ProfiledThreadData::FlushSamplesAndMarkers(JSContext* aCx,
+                                           const TimeStamp& aProcessStartTime,
+                                           ProfileBuffer& aBuffer)
 {
   // This function is used to serialize the current buffer just before
   // JSContext destruction.
-  MOZ_ASSERT(mContext);
+  MOZ_ASSERT(aCx);
 
   // Unlike StreamJSObject, do not surround the samples in brackets by calling
   // aWriter.{Start,End}BareList. The result string will be a comma-separated
   // list of JSON object literals that will prepended by StreamJSObject into
   // an existing array.
   //
   // Note that the UniqueStacks instance is persisted so that the frame-index
   // mapping is stable across JS shutdown.
@@ -265,18 +225,19 @@ ThreadInfo::FlushSamplesAndMarkers(const
         b.Splice(mPartialProfile->mSamplesJSON.get());
         haveSamples = true;
       }
 
       // We deliberately use a new variable instead of writing something like
       // `haveSamples || aBuffer.StreamSamplesToJSON(...)` because we don't want
       // to short-circuit the call.
       bool streamedNewSamples =
-        aBuffer.StreamSamplesToJSON(b, ThreadId(), /* aSinceTime = */ 0,
-                                    mContext, *uniqueStacks);
+        aBuffer.StreamSamplesToJSON(b, mThreadInfo->ThreadId(),
+                                    /* aSinceTime = */ 0,
+                                    aCx, *uniqueStacks);
       haveSamples = haveSamples || streamedNewSamples;
     }
     b.EndBareList();
 
     // https://bugzilla.mozilla.org/show_bug.cgi?id=1428076
     // If we don't have any data, keep samplesJSON set to null. That
     // way we won't try to splice it into the JSON later on, which would
     // result in an invalid JSON due to stray commas.
@@ -294,17 +255,18 @@ ThreadInfo::FlushSamplesAndMarkers(const
         b.Splice(mPartialProfile->mMarkersJSON.get());
         haveMarkers = true;
       }
 
       // We deliberately use a new variable instead of writing something like
       // `haveMarkers || aBuffer.StreamMarkersToJSON(...)` because we don't want
       // to short-circuit the call.
       bool streamedNewMarkers =
-        aBuffer.StreamMarkersToJSON(b, ThreadId(), aProcessStartTime,
+        aBuffer.StreamMarkersToJSON(b, mThreadInfo->ThreadId(),
+                                    aProcessStartTime,
                                     /* aSinceTime = */ 0, *uniqueStacks);
       haveMarkers = haveMarkers || streamedNewMarkers;
     }
     b.EndBareList();
 
     // https://bugzilla.mozilla.org/show_bug.cgi?id=1428076
     // If we don't have any data, keep markersJSON set to null. That
     // way we won't try to splice it into the JSON later on, which would
@@ -316,26 +278,8 @@ ThreadInfo::FlushSamplesAndMarkers(const
 
   mPartialProfile = MakeUnique<PartialThreadProfile>(
     Move(samplesJSON), Move(markersJSON), Move(uniqueStacks));
 
   // Reset the buffer. Attempting to symbolicate JS samples after mContext has
   // gone away will crash.
   aBuffer.Reset();
 }
-
-size_t
-ThreadInfo::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
-{
-  size_t n = aMallocSizeOf(this);
-  n += aMallocSizeOf(mName.get());
-  n += mRacyInfo->SizeOfIncludingThis(aMallocSizeOf);
-
-  // Measurement of the following members may be added later if DMD finds it
-  // is worthwhile:
-  // - mPlatformData
-  // - mPartialProfile
-  //
-  // The following members are not measured:
-  // - mThread: because it is non-owning
-
-  return n;
-}
copy from tools/profiler/core/ThreadInfo.h
copy to tools/profiler/core/ProfiledThreadData.h
--- a/tools/profiler/core/ThreadInfo.h
+++ b/tools/profiler/core/ProfiledThreadData.h
@@ -1,175 +1,25 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#ifndef ThreadInfo_h
-#define ThreadInfo_h
+#ifndef ProfiledThreadData_h
+#define ProfiledThreadData_h
 
 #include "mozilla/NotNull.h"
 #include "mozilla/TimeStamp.h"
 #include "mozilla/UniquePtrExtensions.h"
 
+#include "js/ProfilingStack.h"
 #include "platform.h"
 #include "ProfileBuffer.h"
-#include "js/ProfilingStack.h"
-
-// This class contains the info for a single thread that is accessible without
-// protection from gPSMutex in platform.cpp. Because there is no external
-// protection against data races, it must provide internal protection. Hence
-// the "Racy" prefix.
-//
-class RacyThreadInfo final
-{
-public:
-  explicit RacyThreadInfo(int aThreadId)
-    : mThreadId(aThreadId)
-    , mSleep(AWAKE)
-  {
-    MOZ_COUNT_CTOR(RacyThreadInfo);
-  }
-
-  ~RacyThreadInfo()
-  {
-    MOZ_COUNT_DTOR(RacyThreadInfo);
-  }
-
-  size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
-  {
-    size_t n = aMallocSizeOf(this);
-
-    // Measurement of the following members may be added later if DMD finds it
-    // is worthwhile:
-    // - things in the PseudoStack
-    // - mPendingMarkers
-    //
-    // If these measurements are added, the code must be careful to avoid data
-    // races. (The current code doesn't have any race issues because the
-    // contents of the PseudoStack object aren't accessed; |this| is used only
-    // as an address for lookup by aMallocSizeof).
-
-    return n;
-  }
-
-  void AddPendingMarker(const char* aMarkerName,
-                        mozilla::UniquePtr<ProfilerMarkerPayload> aPayload,
-                        double aTime)
-  {
-    ProfilerMarker* marker =
-      new ProfilerMarker(aMarkerName, mThreadId, Move(aPayload), aTime);
-    mPendingMarkers.insert(marker);
-  }
-
-  // Called within signal. Function must be reentrant.
-  ProfilerMarkerLinkedList* GetPendingMarkers()
-  {
-    // The profiled thread is interrupted, so we can access the list safely.
-    // Unless the profiled thread was in the middle of changing the list when
-    // we interrupted it - in that case, accessList() will return null.
-    return mPendingMarkers.accessList();
-  }
-
-  // This is called on every profiler restart. Put things that should happen at
-  // that time here.
-  void ReinitializeOnResume()
-  {
-    // This is needed to cause an initial sample to be taken from sleeping
-    // threads that had been observed prior to the profiler stopping and
-    // restarting. Otherwise sleeping threads would not have any samples to
-    // copy forward while sleeping.
-    (void)mSleep.compareExchange(SLEEPING_OBSERVED, SLEEPING_NOT_OBSERVED);
-  }
-
-  // This returns true for the second and subsequent calls in each sleep cycle.
-  bool CanDuplicateLastSampleDueToSleep()
-  {
-    if (mSleep == AWAKE) {
-      return false;
-    }
-
-    if (mSleep.compareExchange(SLEEPING_NOT_OBSERVED, SLEEPING_OBSERVED)) {
-      return false;
-    }
-
-    return true;
-  }
-
-  // Call this whenever the current thread sleeps. Calling it twice in a row
-  // without an intervening setAwake() call is an error.
-  void SetSleeping()
-  {
-    MOZ_ASSERT(mSleep == AWAKE);
-    mSleep = SLEEPING_NOT_OBSERVED;
-  }
-
-  // Call this whenever the current thread wakes. Calling it twice in a row
-  // without an intervening setSleeping() call is an error.
-  void SetAwake()
-  {
-    MOZ_ASSERT(mSleep != AWAKE);
-    mSleep = AWAKE;
-  }
-
-  bool IsSleeping() { return mSleep != AWAKE; }
-
-  int ThreadId() const { return mThreadId; }
-
-  class PseudoStack& PseudoStack() { return mPseudoStack; }
-
-private:
-  class PseudoStack mPseudoStack;
-
-  // A list of pending markers that must be moved to the circular buffer.
-  ProfilerSignalSafeLinkedList<ProfilerMarker> mPendingMarkers;
-
-  // mThreadId contains the thread ID of the current thread. It is safe to read
-  // this from multiple threads concurrently, as it will never be mutated.
-  const int mThreadId;
-
-  // mSleep tracks whether the thread is sleeping, and if so, whether it has
-  // been previously observed. This is used for an optimization: in some cases,
-  // when a thread is asleep, we duplicate the previous sample, which is
-  // cheaper than taking a new sample.
-  //
-  // mSleep is atomic because it is accessed from multiple threads.
-  //
-  // - It is written only by this thread, via setSleeping() and setAwake().
-  //
-  // - It is read by SamplerThread::Run().
-  //
-  // There are two cases where racing between threads can cause an issue.
-  //
-  // - If CanDuplicateLastSampleDueToSleep() returns false but that result is
-  //   invalidated before being acted upon, we will take a full sample
-  //   unnecessarily. This is additional work but won't cause any correctness
-  //   issues. (In actual fact, this case is impossible. In order to go from
-  //   CanDuplicateLastSampleDueToSleep() returning false to it returning true
-  //   requires an intermediate call to it in order for mSleep to go from
-  //   SLEEPING_NOT_OBSERVED to SLEEPING_OBSERVED.)
-  //
-  // - If CanDuplicateLastSampleDueToSleep() returns true but that result is
-  //   invalidated before being acted upon -- i.e. the thread wakes up before
-  //   DuplicateLastSample() is called -- we will duplicate the previous
-  //   sample. This is inaccurate, but only slightly... we will effectively
-  //   treat the thread as having slept a tiny bit longer than it really did.
-  //
-  // This latter inaccuracy could be avoided by moving the
-  // CanDuplicateLastSampleDueToSleep() check within the thread-freezing code,
-  // e.g. the section where Tick() is called. But that would reduce the
-  // effectiveness of the optimization because more code would have to be run
-  // before we can tell that duplication is allowed.
-  //
-  static const int AWAKE = 0;
-  static const int SLEEPING_NOT_OBSERVED = 1;
-  static const int SLEEPING_OBSERVED = 2;
-  mozilla::Atomic<int> mSleep;
-};
+#include "ThreadInfo.h"
 
 // Contains data for partial profiles that get saved when
 // ThreadInfo::FlushSamplesAndMarkers gets called.
 struct PartialThreadProfile final
 {
   PartialThreadProfile(mozilla::UniquePtr<char[]>&& aSamplesJSON,
                        mozilla::UniquePtr<char[]>&& aMarkersJSON,
                        mozilla::UniquePtr<UniqueStacks>&& aUniqueStacks)
@@ -178,246 +28,117 @@ struct PartialThreadProfile final
     , mUniqueStacks(mozilla::Move(aUniqueStacks))
   {}
 
   mozilla::UniquePtr<char[]> mSamplesJSON;
   mozilla::UniquePtr<char[]> mMarkersJSON;
   mozilla::UniquePtr<UniqueStacks> mUniqueStacks;
 };
 
-// This class contains the info for a single thread.
+// This class contains information about a thread that is only relevant while
+// the profiler is running, for any threads (both alive and dead) whose thread
+// name matches the "thread filter" in the current profiler run.
+// ProfiledThreadData objects may be kept alive even after the thread is
+// unregistered, as long as there is still data for that thread in the profiler
+// buffer.
+//
+// Accesses to this class are protected by the profiler state lock.
 //
-// Note: A thread's ThreadInfo can be held onto after the thread itself exits,
-// because we may need to output profiling information about that thread. But
-// some of the fields in this class are only relevant while the thread is
-// alive. It's possible that this class could be refactored so there is a
-// clearer split between those fields and the fields that are still relevant
-// after the thread exists.
-class ThreadInfo final
+// Created as soon as the following are true for the thread:
+//  - The profiler is running, and
+//  - the thread matches the profiler's thread filter, and
+//  - the thread is registered with the profiler.
+// So it gets created in response to either (1) the profiler being started (for
+// an existing registered thread) or (2) the thread being registered (if the
+// profiler is already running).
+//
+// The thread may be unregistered during the lifetime of ProfiledThreadData.
+// If that happens, NotifyUnregistered() is called.
+//
+// This class is the right place to store buffer positions. Profiler buffer
+// positions become invalid if the profiler buffer is destroyed, which happens
+// when the profiler is stopped.
+class ProfiledThreadData final
 {
 public:
-  ThreadInfo(const char* aName, int aThreadId, bool aIsMainThread,
-             nsIEventTarget* aThread, void* aStackTop);
-
-  ~ThreadInfo();
-
-  const char* Name() const { return mName.get(); }
-
-  // This is a safe read even when the target thread is not blocked, as this
-  // thread id is never mutated.
-  int ThreadId() const { return RacyInfo()->ThreadId(); }
-
-  bool IsMainThread() const { return mIsMainThread; }
-
-  mozilla::NotNull<RacyThreadInfo*> RacyInfo() const { return mRacyInfo; }
-
-  void StartProfiling();
-  void StopProfiling();
-  bool IsBeingProfiled() { return mIsBeingProfiled; }
+  ProfiledThreadData(ThreadInfo* aThreadInfo, nsIEventTarget* aEventTarget);
+  ~ProfiledThreadData();
 
   void NotifyUnregistered(uint64_t aBufferPosition)
   {
+    mResponsiveness.reset();
+    mLastSample = mozilla::Nothing();
     mUnregisterTime = TimeStamp::Now();
     mBufferPositionWhenUnregistered = mozilla::Some(aBufferPosition);
   }
   mozilla::Maybe<uint64_t> BufferPositionWhenUnregistered() { return mBufferPositionWhenUnregistered; }
 
-  PlatformData* GetPlatformData() const { return mPlatformData.get(); }
-  void* StackTop() const { return mStackTop; }
-
-  size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
-
   mozilla::Maybe<uint64_t>& LastSample() { return mLastSample; }
 
-private:
-  mozilla::UniqueFreePtr<char> mName;
-  mozilla::TimeStamp mRegisterTime;
-  mozilla::TimeStamp mUnregisterTime;
-  mozilla::Maybe<uint64_t> mBufferPositionWhenUnregistered;
-  const bool mIsMainThread;
-  nsCOMPtr<nsIEventTarget> mThread;
-
-  // The thread's RacyThreadInfo. This is an owning pointer. It could be an
-  // inline member, but we don't do that because RacyThreadInfo is quite large
-  // (due to the PseudoStack within it), and we have ThreadInfo vectors and so
-  // we'd end up wasting a lot of space in those vectors for excess elements.
-  mozilla::NotNull<RacyThreadInfo*> mRacyInfo;
-
-  UniquePlatformData mPlatformData;
-  void* mStackTop;
-
-  //
-  // The following code is only used for threads that are being profiled, i.e.
-  // for which IsBeingProfiled() returns true.
-  //
-
-public:
-  void StreamJSON(const ProfileBuffer& aBuffer, SpliceableJSONWriter& aWriter,
+  void StreamJSON(const ProfileBuffer& aBuffer, JSContext* aCx,
+                  SpliceableJSONWriter& aWriter,
                   const mozilla::TimeStamp& aProcessStartTime,
                   double aSinceTime);
 
   // Call this method when the JS entries inside the buffer are about to
   // become invalid, i.e., just before JS shutdown.
-  void FlushSamplesAndMarkers(const mozilla::TimeStamp& aProcessStartTime,
+  void FlushSamplesAndMarkers(JSContext* aCx,
+                              const mozilla::TimeStamp& aProcessStartTime,
                               ProfileBuffer& aBuffer);
 
   // Returns nullptr if this is not the main thread or if this thread is not
   // being profiled.
   ThreadResponsiveness* GetThreadResponsiveness()
   {
     ThreadResponsiveness* responsiveness = mResponsiveness.ptrOr(nullptr);
-    MOZ_ASSERT(!responsiveness || mIsBeingProfiled);
     return responsiveness;
   }
 
-  // Set the JSContext of the thread to be sampled. Sampling cannot begin until
-  // this has been set.
-  void SetJSContext(JSContext* aContext)
-  {
-    // This function runs on-thread.
-
-    MOZ_ASSERT(aContext && !mContext);
-
-    mContext = aContext;
-
-    // We give the JS engine a non-owning reference to the PseudoStack. It's
-    // important that the JS engine doesn't touch this once the thread dies.
-    js::SetContextProfilingStack(aContext, &RacyInfo()->PseudoStack());
-
-    PollJSSampling();
-  }
-
-  // Request that this thread start JS sampling. JS sampling won't actually
-  // start until a subsequent PollJSSampling() call occurs *and* mContext has
-  // been set.
-  void StartJSSampling()
-  {
-    // This function runs on-thread or off-thread.
-
-    MOZ_RELEASE_ASSERT(mJSSampling == INACTIVE ||
-                       mJSSampling == INACTIVE_REQUESTED);
-    mJSSampling = ACTIVE_REQUESTED;
-  }
-
-  // Request that this thread stop JS sampling. JS sampling won't actually stop
-  // until a subsequent PollJSSampling() call occurs.
-  void StopJSSampling()
-  {
-    // This function runs on-thread or off-thread.
-
-    MOZ_RELEASE_ASSERT(mJSSampling == ACTIVE ||
-                       mJSSampling == ACTIVE_REQUESTED);
-    mJSSampling = INACTIVE_REQUESTED;
-  }
-
-  // Poll to see if JS sampling should be started/stopped.
-  void PollJSSampling()
-  {
-    // This function runs on-thread.
-
-    // We can't start/stop profiling until we have the thread's JSContext.
-    if (mContext) {
-      // It is possible for mJSSampling to go through the following sequences.
-      //
-      // - INACTIVE, ACTIVE_REQUESTED, INACTIVE_REQUESTED, INACTIVE
-      //
-      // - ACTIVE, INACTIVE_REQUESTED, ACTIVE_REQUESTED, ACTIVE
-      //
-      // Therefore, the if and else branches here aren't always interleaved.
-      // This is ok because the JS engine can handle that.
-      //
-      if (mJSSampling == ACTIVE_REQUESTED) {
-        mJSSampling = ACTIVE;
-        js::EnableContextProfilingStack(mContext, true);
-        js::RegisterContextProfilingEventMarker(mContext, profiler_add_marker);
-
-      } else if (mJSSampling == INACTIVE_REQUESTED) {
-        mJSSampling = INACTIVE;
-        js::EnableContextProfilingStack(mContext, false);
-      }
-    }
-  }
+  const RefPtr<ThreadInfo> Info() const { return mThreadInfo; }
 
 private:
-  bool mIsBeingProfiled;
+  // Group A:
+  // The following fields are interesting for the entire lifetime of a
+  // ProfiledThreadData object.
+
+  // This thread's thread info.
+  const RefPtr<ThreadInfo> mThreadInfo;
 
   // JS frames in the buffer may require a live JSRuntime to stream (e.g.,
   // stringifying JIT frames). In the case of JSRuntime destruction,
   // FlushSamplesAndMarkers should be called to save them. These are spliced
   // into the final stream.
   UniquePtr<PartialThreadProfile> mPartialProfile;
 
-  // This is used only for nsIThreads.
-  mozilla::Maybe<ThreadResponsiveness> mResponsiveness;
-
-public:
-  // If this is a JS thread, this is its JSContext, which is required for any
-  // JS sampling.
-  JSContext* mContext;
+  // Group B:
+  // The following fields are only used while this thread is alive and
+  // registered. They become Nothing() once the thread is unregistered.
 
-private:
-  // The profiler needs to start and stop JS sampling of JS threads at various
-  // times. However, the JS engine can only do the required actions on the
-  // JS thread itself ("on-thread"), not from another thread ("off-thread").
-  // Therefore, we have the following two-step process.
-  //
-  // - The profiler requests (on-thread or off-thread) that the JS sampling be
-  //   started/stopped, by changing mJSSampling to the appropriate REQUESTED
-  //   state.
-  //
-  // - The relevant JS thread polls (on-thread) for changes to mJSSampling.
-  //   When it sees a REQUESTED state, it performs the appropriate actions to
-  //   actually start/stop JS sampling, and changes mJSSampling out of the
-  //   REQUESTED state.
-  //
-  // The state machine is as follows.
-  //
-  //             INACTIVE --> ACTIVE_REQUESTED
-  //                  ^       ^ |
-  //                  |     _/  |
-  //                  |   _/    |
-  //                  |  /      |
-  //                  | v       v
-  //   INACTIVE_REQUESTED <-- ACTIVE
-  //
-  // The polling is done in the following two ways.
-  //
-  // - Via the interrupt callback mechanism; the JS thread must call
-  //   profiler_js_interrupt_callback() from its own interrupt callback.
-  //   This is how sampling must be started/stopped for threads where the
-  //   request was made off-thread.
-  //
-  // - When {Start,Stop}JSSampling() is called on-thread, we can immediately
-  //   follow it with a PollJSSampling() call to avoid the delay between the
-  //   two steps. Likewise, setJSContext() calls PollJSSampling().
-  //
-  // One non-obvious thing about all this: these JS sampling requests are made
-  // on all threads, even non-JS threads. mContext needs to also be set (via
-  // setJSContext(), which can only happen for JS threads) for any JS sampling
-  // to actually happen.
-  //
-  enum {
-    INACTIVE = 0,
-    ACTIVE_REQUESTED = 1,
-    ACTIVE = 2,
-    INACTIVE_REQUESTED = 3,
-  } mJSSampling;
+  // A helper object that instruments nsIThreads to obtain responsiveness
+  // information about their event loop.
+  mozilla::Maybe<ThreadResponsiveness> mResponsiveness;
 
   // When sampling, this holds the position in ActivePS::mBuffer of the most
   // recent sample for this thread, or Nothing() if there is no sample for this
   // thread in the buffer.
   mozilla::Maybe<uint64_t> mLastSample;
+
+  // Group C:
+  // The following fields are only used once this thread has been unregistered.
+
+  mozilla::Maybe<uint64_t> mBufferPositionWhenUnregistered;
+  mozilla::TimeStamp mUnregisterTime;
 };
 
 void
 StreamSamplesAndMarkers(const char* aName, int aThreadId,
                         const ProfileBuffer& aBuffer,
                         SpliceableJSONWriter& aWriter,
                         const mozilla::TimeStamp& aProcessStartTime,
                         const TimeStamp& aRegisterTime,
                         const TimeStamp& aUnregisterTime,
                         double aSinceTime,
                         JSContext* aContext,
                         UniquePtr<char[]>&& aPartialSamplesJSON,
                         UniquePtr<char[]>&& aPartialMarkersJSON,
                         UniqueStacks& aUniqueStacks);
 
-#endif  // ThreadInfo_h
+#endif  // ProfiledThreadData_h
copy from tools/profiler/core/ThreadInfo.cpp
copy to tools/profiler/core/RegisteredThread.cpp
--- a/tools/profiler/core/ThreadInfo.cpp
+++ b/tools/profiler/core/RegisteredThread.cpp
@@ -1,341 +1,47 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#include "ThreadInfo.h"
-
-#include "mozilla/DebugOnly.h"
-
-#if defined(GP_OS_darwin)
-#include <pthread.h>
-#endif
+#include "RegisteredThread.h"
 
-#ifdef XP_WIN
-#include <process.h>
-#define getpid _getpid
-#else
-#include <unistd.h> // for getpid()
-#endif
-
-ThreadInfo::ThreadInfo(const char* aName,
-                       int aThreadId,
-                       bool aIsMainThread,
-                       nsIEventTarget* aThread,
-                       void* aStackTop)
-  : mName(strdup(aName))
-  , mRegisterTime(TimeStamp::Now())
-  , mIsMainThread(aIsMainThread)
+RegisteredThread::RegisteredThread(ThreadInfo* aInfo, nsIEventTarget* aThread,
+                                   void* aStackTop)
+  : mRacyRegisteredThread(aInfo->ThreadId())
+  , mPlatformData(AllocPlatformData(aInfo->ThreadId()))
+  , mStackTop(aStackTop)
+  , mThreadInfo(aInfo)
   , mThread(aThread)
-  , mRacyInfo(mozilla::MakeNotNull<RacyThreadInfo*>(aThreadId))
-  , mPlatformData(AllocPlatformData(aThreadId))
-  , mStackTop(aStackTop)
-  , mIsBeingProfiled(false)
   , mContext(nullptr)
   , mJSSampling(INACTIVE)
-  , mLastSample()
 {
-  MOZ_COUNT_CTOR(ThreadInfo);
+  MOZ_COUNT_CTOR(RegisteredThread);
 
   // We don't have to guess on mac
 #if defined(GP_OS_darwin)
   pthread_t self = pthread_self();
   mStackTop = pthread_get_stackaddr_np(self);
 #endif
-
-  // I don't know if we can assert this. But we should warn.
-  MOZ_ASSERT(aThreadId >= 0, "native thread ID is < 0");
-  MOZ_ASSERT(aThreadId <= INT32_MAX, "native thread ID is > INT32_MAX");
-}
-
-ThreadInfo::~ThreadInfo()
-{
-  MOZ_COUNT_DTOR(ThreadInfo);
-
-  delete mRacyInfo;
-}
-
-void
-ThreadInfo::StartProfiling()
-{
-  mIsBeingProfiled = true;
-  mRacyInfo->ReinitializeOnResume();
-  mResponsiveness.emplace(mThread, mIsMainThread);
-}
-
-void
-ThreadInfo::StopProfiling()
-{
-  mResponsiveness.reset();
-  mPartialProfile = nullptr;
-  mIsBeingProfiled = false;
-}
-
-void
-ThreadInfo::StreamJSON(const ProfileBuffer& aBuffer,
-                       SpliceableJSONWriter& aWriter,
-                       const TimeStamp& aProcessStartTime, double aSinceTime)
-{
-  UniquePtr<PartialThreadProfile> partialProfile = Move(mPartialProfile);
-
-  UniquePtr<UniqueStacks> uniqueStacks = partialProfile
-    ? Move(partialProfile->mUniqueStacks)
-    : MakeUnique<UniqueStacks>();
-
-  uniqueStacks->AdvanceStreamingGeneration();
-
-  UniquePtr<char[]> partialSamplesJSON;
-  UniquePtr<char[]> partialMarkersJSON;
-  if (partialProfile) {
-    partialSamplesJSON = Move(partialProfile->mSamplesJSON);
-    partialMarkersJSON = Move(partialProfile->mMarkersJSON);
-  }
-
-  aWriter.Start();
-  {
-    StreamSamplesAndMarkers(Name(), ThreadId(), aBuffer, aWriter,
-                            aProcessStartTime,
-                            mRegisterTime, mUnregisterTime,
-                            aSinceTime, mContext,
-                            Move(partialSamplesJSON),
-                            Move(partialMarkersJSON),
-                            *uniqueStacks);
-
-    aWriter.StartObjectProperty("stackTable");
-    {
-      {
-        JSONSchemaWriter schema(aWriter);
-        schema.WriteField("prefix");
-        schema.WriteField("frame");
-      }
-
-      aWriter.StartArrayProperty("data");
-      {
-        uniqueStacks->SpliceStackTableElements(aWriter);
-      }
-      aWriter.EndArray();
-    }
-    aWriter.EndObject();
-
-    aWriter.StartObjectProperty("frameTable");
-    {
-      {
-        JSONSchemaWriter schema(aWriter);
-        schema.WriteField("location");
-        schema.WriteField("implementation");
-        schema.WriteField("optimizations");
-        schema.WriteField("line");
-        schema.WriteField("category");
-      }
-
-      aWriter.StartArrayProperty("data");
-      {
-        uniqueStacks->SpliceFrameTableElements(aWriter);
-      }
-      aWriter.EndArray();
-    }
-    aWriter.EndObject();
-
-    aWriter.StartArrayProperty("stringTable");
-    {
-      uniqueStacks->mUniqueStrings.SpliceStringTableElements(aWriter);
-    }
-    aWriter.EndArray();
-  }
-
-  aWriter.End();
 }
 
-void
-StreamSamplesAndMarkers(const char* aName,
-                        int aThreadId,
-                        const ProfileBuffer& aBuffer,
-                        SpliceableJSONWriter& aWriter,
-                        const TimeStamp& aProcessStartTime,
-                        const TimeStamp& aRegisterTime,
-                        const TimeStamp& aUnregisterTime,
-                        double aSinceTime,
-                        JSContext* aContext,
-                        UniquePtr<char[]>&& aPartialSamplesJSON,
-                        UniquePtr<char[]>&& aPartialMarkersJSON,
-                        UniqueStacks& aUniqueStacks)
+RegisteredThread::~RegisteredThread()
 {
-  aWriter.StringProperty("processType",
-                         XRE_ChildProcessTypeToString(XRE_GetProcessType()));
-
-  aWriter.StringProperty("name", aName);
-  aWriter.IntProperty("tid", static_cast<int64_t>(aThreadId));
-  aWriter.IntProperty("pid", static_cast<int64_t>(getpid()));
-
-  if (aRegisterTime) {
-    aWriter.DoubleProperty("registerTime",
-      (aRegisterTime - aProcessStartTime).ToMilliseconds());
-  } else {
-    aWriter.NullProperty("registerTime");
-  }
-
-  if (aUnregisterTime) {
-    aWriter.DoubleProperty("unregisterTime",
-      (aUnregisterTime - aProcessStartTime).ToMilliseconds());
-  } else {
-    aWriter.NullProperty("unregisterTime");
-  }
-
-  aWriter.StartObjectProperty("samples");
-  {
-    {
-      JSONSchemaWriter schema(aWriter);
-      schema.WriteField("stack");
-      schema.WriteField("time");
-      schema.WriteField("responsiveness");
-      schema.WriteField("rss");
-      schema.WriteField("uss");
-    }
-
-    aWriter.StartArrayProperty("data");
-    {
-      if (aPartialSamplesJSON) {
-        // We would only have saved streamed samples during shutdown
-        // streaming, which cares about dumping the entire buffer, and thus
-        // should have passed in 0 for aSinceTime.
-        MOZ_ASSERT(aSinceTime == 0);
-        aWriter.Splice(aPartialSamplesJSON.get());
-      }
-      aBuffer.StreamSamplesToJSON(aWriter, aThreadId, aSinceTime,
-                                  aContext, aUniqueStacks);
-    }
-    aWriter.EndArray();
-  }
-  aWriter.EndObject();
-
-  aWriter.StartObjectProperty("markers");
-  {
-    {
-      JSONSchemaWriter schema(aWriter);
-      schema.WriteField("name");
-      schema.WriteField("time");
-      schema.WriteField("data");
-    }
-
-    aWriter.StartArrayProperty("data");
-    {
-      if (aPartialMarkersJSON) {
-        MOZ_ASSERT(aSinceTime == 0);
-        aWriter.Splice(aPartialMarkersJSON.get());
-      }
-      aBuffer.StreamMarkersToJSON(aWriter, aThreadId, aProcessStartTime,
-                                  aSinceTime, aUniqueStacks);
-    }
-    aWriter.EndArray();
-  }
-  aWriter.EndObject();
-}
-
-void
-ThreadInfo::FlushSamplesAndMarkers(const TimeStamp& aProcessStartTime,
-                                   ProfileBuffer& aBuffer)
-{
-  // This function is used to serialize the current buffer just before
-  // JSContext destruction.
-  MOZ_ASSERT(mContext);
-
-  // Unlike StreamJSObject, do not surround the samples in brackets by calling
-  // aWriter.{Start,End}BareList. The result string will be a comma-separated
-  // list of JSON object literals that will prepended by StreamJSObject into
-  // an existing array.
-  //
-  // Note that the UniqueStacks instance is persisted so that the frame-index
-  // mapping is stable across JS shutdown.
-  UniquePtr<UniqueStacks> uniqueStacks = mPartialProfile
-    ? Move(mPartialProfile->mUniqueStacks)
-    : MakeUnique<UniqueStacks>();
-
-  uniqueStacks->AdvanceStreamingGeneration();
-
-  UniquePtr<char[]> samplesJSON;
-  UniquePtr<char[]> markersJSON;
-
-  {
-    SpliceableChunkedJSONWriter b;
-    b.StartBareList();
-    bool haveSamples = false;
-    {
-      if (mPartialProfile && mPartialProfile->mSamplesJSON) {
-        b.Splice(mPartialProfile->mSamplesJSON.get());
-        haveSamples = true;
-      }
-
-      // We deliberately use a new variable instead of writing something like
-      // `haveSamples || aBuffer.StreamSamplesToJSON(...)` because we don't want
-      // to short-circuit the call.
-      bool streamedNewSamples =
-        aBuffer.StreamSamplesToJSON(b, ThreadId(), /* aSinceTime = */ 0,
-                                    mContext, *uniqueStacks);
-      haveSamples = haveSamples || streamedNewSamples;
-    }
-    b.EndBareList();
-
-    // https://bugzilla.mozilla.org/show_bug.cgi?id=1428076
-    // If we don't have any data, keep samplesJSON set to null. That
-    // way we won't try to splice it into the JSON later on, which would
-    // result in an invalid JSON due to stray commas.
-    if (haveSamples) {
-      samplesJSON = b.WriteFunc()->CopyData();
-    }
-  }
-
-  {
-    SpliceableChunkedJSONWriter b;
-    b.StartBareList();
-    bool haveMarkers = false;
-    {
-      if (mPartialProfile && mPartialProfile->mMarkersJSON) {
-        b.Splice(mPartialProfile->mMarkersJSON.get());
-        haveMarkers = true;
-      }
-
-      // We deliberately use a new variable instead of writing something like
-      // `haveMarkers || aBuffer.StreamMarkersToJSON(...)` because we don't want
-      // to short-circuit the call.
-      bool streamedNewMarkers =
-        aBuffer.StreamMarkersToJSON(b, ThreadId(), aProcessStartTime,
-                                    /* aSinceTime = */ 0, *uniqueStacks);
-      haveMarkers = haveMarkers || streamedNewMarkers;
-    }
-    b.EndBareList();
-
-    // https://bugzilla.mozilla.org/show_bug.cgi?id=1428076
-    // If we don't have any data, keep markersJSON set to null. That
-    // way we won't try to splice it into the JSON later on, which would
-    // result in an invalid JSON due to stray commas.
-    if (haveMarkers) {
-      markersJSON = b.WriteFunc()->CopyData();
-    }
-  }
-
-  mPartialProfile = MakeUnique<PartialThreadProfile>(
-    Move(samplesJSON), Move(markersJSON), Move(uniqueStacks));
-
-  // Reset the buffer. Attempting to symbolicate JS samples after mContext has
-  // gone away will crash.
-  aBuffer.Reset();
+  MOZ_COUNT_DTOR(RegisteredThread);
 }
 
 size_t
-ThreadInfo::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
+RegisteredThread::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
 {
   size_t n = aMallocSizeOf(this);
-  n += aMallocSizeOf(mName.get());
-  n += mRacyInfo->SizeOfIncludingThis(aMallocSizeOf);
 
   // Measurement of the following members may be added later if DMD finds it
   // is worthwhile:
   // - mPlatformData
-  // - mPartialProfile
+  // - mRacyRegisteredThread.mPendingMarkers
   //
   // The following members are not measured:
-  // - mThread: because it is non-owning
+  // - mThreadInfo: because it is non-owning
 
   return n;
 }
copy from tools/profiler/core/ThreadInfo.h
copy to tools/profiler/core/RegisteredThread.h
--- a/tools/profiler/core/ThreadInfo.h
+++ b/tools/profiler/core/RegisteredThread.h
@@ -1,60 +1,40 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#ifndef ThreadInfo_h
-#define ThreadInfo_h
+#ifndef RegisteredThread_h
+#define RegisteredThread_h
 
-#include "mozilla/NotNull.h"
-#include "mozilla/TimeStamp.h"
 #include "mozilla/UniquePtrExtensions.h"
 
 #include "platform.h"
-#include "ProfileBuffer.h"
-#include "js/ProfilingStack.h"
+#include "ThreadInfo.h"
 
-// This class contains the info for a single thread that is accessible without
+// This class contains the state for a single thread that is accessible without
 // protection from gPSMutex in platform.cpp. Because there is no external
 // protection against data races, it must provide internal protection. Hence
 // the "Racy" prefix.
 //
-class RacyThreadInfo final
+class RacyRegisteredThread final
 {
 public:
-  explicit RacyThreadInfo(int aThreadId)
+  explicit RacyRegisteredThread(int aThreadId)
     : mThreadId(aThreadId)
     , mSleep(AWAKE)
   {
-    MOZ_COUNT_CTOR(RacyThreadInfo);
-  }
-
-  ~RacyThreadInfo()
-  {
-    MOZ_COUNT_DTOR(RacyThreadInfo);
+    MOZ_COUNT_CTOR(RacyRegisteredThread);
   }
 
-  size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
+  ~RacyRegisteredThread()
   {
-    size_t n = aMallocSizeOf(this);
-
-    // Measurement of the following members may be added later if DMD finds it
-    // is worthwhile:
-    // - things in the PseudoStack
-    // - mPendingMarkers
-    //
-    // If these measurements are added, the code must be careful to avoid data
-    // races. (The current code doesn't have any race issues because the
-    // contents of the PseudoStack object aren't accessed; |this| is used only
-    // as an address for lookup by aMallocSizeof).
-
-    return n;
+    MOZ_COUNT_DTOR(RacyRegisteredThread);
   }
 
   void AddPendingMarker(const char* aMarkerName,
                         mozilla::UniquePtr<ProfilerMarkerPayload> aPayload,
                         double aTime)
   {
     ProfilerMarker* marker =
       new ProfilerMarker(aMarkerName, mThreadId, Move(aPayload), aTime);
@@ -111,16 +91,17 @@ public:
     mSleep = AWAKE;
   }
 
   bool IsSleeping() { return mSleep != AWAKE; }
 
   int ThreadId() const { return mThreadId; }
 
   class PseudoStack& PseudoStack() { return mPseudoStack; }
+  const class PseudoStack& PseudoStack() const { return mPseudoStack; }
 
 private:
   class PseudoStack mPseudoStack;
 
   // A list of pending markers that must be moved to the circular buffer.
   ProfilerSignalSafeLinkedList<ProfilerMarker> mPendingMarkers;
 
   // mThreadId contains the thread ID of the current thread. It is safe to read
@@ -161,135 +142,63 @@ private:
   // before we can tell that duplication is allowed.
   //
   static const int AWAKE = 0;
   static const int SLEEPING_NOT_OBSERVED = 1;
   static const int SLEEPING_OBSERVED = 2;
   mozilla::Atomic<int> mSleep;
 };
 
-// Contains data for partial profiles that get saved when
-// ThreadInfo::FlushSamplesAndMarkers gets called.
-struct PartialThreadProfile final
-{
-  PartialThreadProfile(mozilla::UniquePtr<char[]>&& aSamplesJSON,
-                       mozilla::UniquePtr<char[]>&& aMarkersJSON,
-                       mozilla::UniquePtr<UniqueStacks>&& aUniqueStacks)
-    : mSamplesJSON(mozilla::Move(aSamplesJSON))
-    , mMarkersJSON(mozilla::Move(aMarkersJSON))
-    , mUniqueStacks(mozilla::Move(aUniqueStacks))
-  {}
-
-  mozilla::UniquePtr<char[]> mSamplesJSON;
-  mozilla::UniquePtr<char[]> mMarkersJSON;
-  mozilla::UniquePtr<UniqueStacks> mUniqueStacks;
-};
-
-// This class contains the info for a single thread.
-//
-// Note: A thread's ThreadInfo can be held onto after the thread itself exits,
-// because we may need to output profiling information about that thread. But
-// some of the fields in this class are only relevant while the thread is
-// alive. It's possible that this class could be refactored so there is a
-// clearer split between those fields and the fields that are still relevant
-// after the thread exists.
-class ThreadInfo final
+// This class contains information that's relevant to a single thread only
+// while that thread is running and registered with the profiler, but
+// regardless of whether the profiler is running. All accesses to it are
+// protected by the profiler state lock.
+class RegisteredThread final
 {
 public:
-  ThreadInfo(const char* aName, int aThreadId, bool aIsMainThread,
-             nsIEventTarget* aThread, void* aStackTop);
-
-  ~ThreadInfo();
-
-  const char* Name() const { return mName.get(); }
-
-  // This is a safe read even when the target thread is not blocked, as this
-  // thread id is never mutated.
-  int ThreadId() const { return RacyInfo()->ThreadId(); }
-
-  bool IsMainThread() const { return mIsMainThread; }
+  RegisteredThread(ThreadInfo* aInfo, nsIEventTarget* aThread,
+                   void* aStackTop);
+  ~RegisteredThread();
 
-  mozilla::NotNull<RacyThreadInfo*> RacyInfo() const { return mRacyInfo; }
-
-  void StartProfiling();
-  void StopProfiling();
-  bool IsBeingProfiled() { return mIsBeingProfiled; }
-
-  void NotifyUnregistered(uint64_t aBufferPosition)
-  {
-    mUnregisterTime = TimeStamp::Now();
-    mBufferPositionWhenUnregistered = mozilla::Some(aBufferPosition);
-  }
-  mozilla::Maybe<uint64_t> BufferPositionWhenUnregistered() { return mBufferPositionWhenUnregistered; }
+  class RacyRegisteredThread& RacyRegisteredThread() { return mRacyRegisteredThread; }
+  const class RacyRegisteredThread& RacyRegisteredThread() const { return mRacyRegisteredThread; }
 
   PlatformData* GetPlatformData() const { return mPlatformData.get(); }
-  void* StackTop() const { return mStackTop; }
+  const void* StackTop() const { return mStackTop; }
 
   size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
 
-  mozilla::Maybe<uint64_t>& LastSample() { return mLastSample; }
-
-private:
-  mozilla::UniqueFreePtr<char> mName;
-  mozilla::TimeStamp mRegisterTime;
-  mozilla::TimeStamp mUnregisterTime;
-  mozilla::Maybe<uint64_t> mBufferPositionWhenUnregistered;
-  const bool mIsMainThread;
-  nsCOMPtr<nsIEventTarget> mThread;
-
-  // The thread's RacyThreadInfo. This is an owning pointer. It could be an
-  // inline member, but we don't do that because RacyThreadInfo is quite large
-  // (due to the PseudoStack within it), and we have ThreadInfo vectors and so
-  // we'd end up wasting a lot of space in those vectors for excess elements.
-  mozilla::NotNull<RacyThreadInfo*> mRacyInfo;
-
-  UniquePlatformData mPlatformData;
-  void* mStackTop;
-
-  //
-  // The following code is only used for threads that are being profiled, i.e.
-  // for which IsBeingProfiled() returns true.
-  //
-
-public:
-  void StreamJSON(const ProfileBuffer& aBuffer, SpliceableJSONWriter& aWriter,
-                  const mozilla::TimeStamp& aProcessStartTime,
-                  double aSinceTime);
-
-  // Call this method when the JS entries inside the buffer are about to
-  // become invalid, i.e., just before JS shutdown.
-  void FlushSamplesAndMarkers(const mozilla::TimeStamp& aProcessStartTime,
-                              ProfileBuffer& aBuffer);
-
-  // Returns nullptr if this is not the main thread or if this thread is not
-  // being profiled.
-  ThreadResponsiveness* GetThreadResponsiveness()
-  {
-    ThreadResponsiveness* responsiveness = mResponsiveness.ptrOr(nullptr);
-    MOZ_ASSERT(!responsiveness || mIsBeingProfiled);
-    return responsiveness;
-  }
-
   // Set the JSContext of the thread to be sampled. Sampling cannot begin until
   // this has been set.
   void SetJSContext(JSContext* aContext)
   {
     // This function runs on-thread.
 
     MOZ_ASSERT(aContext && !mContext);
 
     mContext = aContext;
 
     // We give the JS engine a non-owning reference to the PseudoStack. It's
     // important that the JS engine doesn't touch this once the thread dies.
-    js::SetContextProfilingStack(aContext, &RacyInfo()->PseudoStack());
+    js::SetContextProfilingStack(aContext, &RacyRegisteredThread().PseudoStack());
 
     PollJSSampling();
   }
 
+  void ClearJSContext()
+  {
+    // This function runs on-thread.
+    mContext = nullptr;
+  }
+
+  JSContext* GetJSContext() const { return mContext; }
+
+  const RefPtr<ThreadInfo> Info() const { return mThreadInfo; }
+  const nsCOMPtr<nsIEventTarget> GetEventTarget() const { return mThread; }
+
   // Request that this thread start JS sampling. JS sampling won't actually
   // start until a subsequent PollJSSampling() call occurs *and* mContext has
   // been set.
   void StartJSSampling()
   {
     // This function runs on-thread or off-thread.
 
     MOZ_RELEASE_ASSERT(mJSSampling == INACTIVE ||
@@ -332,33 +241,28 @@ public:
       } else if (mJSSampling == INACTIVE_REQUESTED) {
         mJSSampling = INACTIVE;
         js::EnableContextProfilingStack(mContext, false);
       }
     }
   }
 
 private:
-  bool mIsBeingProfiled;
+  class RacyRegisteredThread mRacyRegisteredThread;
 
-  // JS frames in the buffer may require a live JSRuntime to stream (e.g.,
-  // stringifying JIT frames). In the case of JSRuntime destruction,
-  // FlushSamplesAndMarkers should be called to save them. These are spliced
-  // into the final stream.
-  UniquePtr<PartialThreadProfile> mPartialProfile;
+  const UniquePlatformData mPlatformData;
+  const void* mStackTop;
 
-  // This is used only for nsIThreads.
-  mozilla::Maybe<ThreadResponsiveness> mResponsiveness;
+  const RefPtr<ThreadInfo> mThreadInfo;
+  const nsCOMPtr<nsIEventTarget> mThread;
 
-public:
   // If this is a JS thread, this is its JSContext, which is required for any
   // JS sampling.
   JSContext* mContext;
 
-private:
   // The profiler needs to start and stop JS sampling of JS threads at various
   // times. However, the JS engine can only do the required actions on the
   // JS thread itself ("on-thread"), not from another thread ("off-thread").
   // Therefore, we have the following two-step process.
   //
   // - The profiler requests (on-thread or off-thread) that the JS sampling be
   //   started/stopped, by changing mJSSampling to the appropriate REQUESTED
   //   state.
@@ -395,29 +299,11 @@ private:
   // to actually happen.
   //
   enum {
     INACTIVE = 0,
     ACTIVE_REQUESTED = 1,
     ACTIVE = 2,
     INACTIVE_REQUESTED = 3,
   } mJSSampling;
-
-  // When sampling, this holds the position in ActivePS::mBuffer of the most
-  // recent sample for this thread, or Nothing() if there is no sample for this
-  // thread in the buffer.
-  mozilla::Maybe<uint64_t> mLastSample;
 };
 
-void
-StreamSamplesAndMarkers(const char* aName, int aThreadId,
-                        const ProfileBuffer& aBuffer,
-                        SpliceableJSONWriter& aWriter,
-                        const mozilla::TimeStamp& aProcessStartTime,
-                        const TimeStamp& aRegisterTime,
-                        const TimeStamp& aUnregisterTime,
-                        double aSinceTime,
-                        JSContext* aContext,
-                        UniquePtr<char[]>&& aPartialSamplesJSON,
-                        UniquePtr<char[]>&& aPartialMarkersJSON,
-                        UniqueStacks& aUniqueStacks);
-
-#endif  // ThreadInfo_h
+#endif  // RegisteredThread_h
--- a/tools/profiler/core/ThreadInfo.h
+++ b/tools/profiler/core/ThreadInfo.h
@@ -2,422 +2,47 @@
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef ThreadInfo_h
 #define ThreadInfo_h
 
-#include "mozilla/NotNull.h"
 #include "mozilla/TimeStamp.h"
-#include "mozilla/UniquePtrExtensions.h"
-
-#include "platform.h"
-#include "ProfileBuffer.h"
-#include "js/ProfilingStack.h"
-
-// This class contains the info for a single thread that is accessible without
-// protection from gPSMutex in platform.cpp. Because there is no external
-// protection against data races, it must provide internal protection. Hence
-// the "Racy" prefix.
-//
-class RacyThreadInfo final
-{
-public:
-  explicit RacyThreadInfo(int aThreadId)
-    : mThreadId(aThreadId)
-    , mSleep(AWAKE)
-  {
-    MOZ_COUNT_CTOR(RacyThreadInfo);
-  }
-
-  ~RacyThreadInfo()
-  {
-    MOZ_COUNT_DTOR(RacyThreadInfo);
-  }
-
-  size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
-  {
-    size_t n = aMallocSizeOf(this);
-
-    // Measurement of the following members may be added later if DMD finds it
-    // is worthwhile:
-    // - things in the PseudoStack
-    // - mPendingMarkers
-    //
-    // If these measurements are added, the code must be careful to avoid data
-    // races. (The current code doesn't have any race issues because the
-    // contents of the PseudoStack object aren't accessed; |this| is used only
-    // as an address for lookup by aMallocSizeof).
 
-    return n;
-  }
-
-  void AddPendingMarker(const char* aMarkerName,
-                        mozilla::UniquePtr<ProfilerMarkerPayload> aPayload,
-                        double aTime)
-  {
-    ProfilerMarker* marker =
-      new ProfilerMarker(aMarkerName, mThreadId, Move(aPayload), aTime);
-    mPendingMarkers.insert(marker);
-  }
-
-  // Called within signal. Function must be reentrant.
-  ProfilerMarkerLinkedList* GetPendingMarkers()
-  {
-    // The profiled thread is interrupted, so we can access the list safely.
-    // Unless the profiled thread was in the middle of changing the list when
-    // we interrupted it - in that case, accessList() will return null.
-    return mPendingMarkers.accessList();
-  }
-
-  // This is called on every profiler restart. Put things that should happen at
-  // that time here.
-  void ReinitializeOnResume()
-  {
-    // This is needed to cause an initial sample to be taken from sleeping
-    // threads that had been observed prior to the profiler stopping and
-    // restarting. Otherwise sleeping threads would not have any samples to
-    // copy forward while sleeping.
-    (void)mSleep.compareExchange(SLEEPING_OBSERVED, SLEEPING_NOT_OBSERVED);
-  }
-
-  // This returns true for the second and subsequent calls in each sleep cycle.
-  bool CanDuplicateLastSampleDueToSleep()
-  {
-    if (mSleep == AWAKE) {
-      return false;
-    }
-
-    if (mSleep.compareExchange(SLEEPING_NOT_OBSERVED, SLEEPING_OBSERVED)) {
-      return false;
-    }
-
-    return true;
-  }
+#include "nsString.h"
 
-  // Call this whenever the current thread sleeps. Calling it twice in a row
-  // without an intervening setAwake() call is an error.
-  void SetSleeping()
-  {
-    MOZ_ASSERT(mSleep == AWAKE);
-    mSleep = SLEEPING_NOT_OBSERVED;
-  }
-
-  // Call this whenever the current thread wakes. Calling it twice in a row
-  // without an intervening setSleeping() call is an error.
-  void SetAwake()
-  {
-    MOZ_ASSERT(mSleep != AWAKE);
-    mSleep = AWAKE;
-  }
-
-  bool IsSleeping() { return mSleep != AWAKE; }
-
-  int ThreadId() const { return mThreadId; }
-
-  class PseudoStack& PseudoStack() { return mPseudoStack; }
-
-private:
-  class PseudoStack mPseudoStack;
-
-  // A list of pending markers that must be moved to the circular buffer.
-  ProfilerSignalSafeLinkedList<ProfilerMarker> mPendingMarkers;
-
-  // mThreadId contains the thread ID of the current thread. It is safe to read
-  // this from multiple threads concurrently, as it will never be mutated.
-  const int mThreadId;
-
-  // mSleep tracks whether the thread is sleeping, and if so, whether it has
-  // been previously observed. This is used for an optimization: in some cases,
-  // when a thread is asleep, we duplicate the previous sample, which is
-  // cheaper than taking a new sample.
-  //
-  // mSleep is atomic because it is accessed from multiple threads.
-  //
-  // - It is written only by this thread, via setSleeping() and setAwake().
-  //
-  // - It is read by SamplerThread::Run().
-  //
-  // There are two cases where racing between threads can cause an issue.
-  //
-  // - If CanDuplicateLastSampleDueToSleep() returns false but that result is
-  //   invalidated before being acted upon, we will take a full sample
-  //   unnecessarily. This is additional work but won't cause any correctness
-  //   issues. (In actual fact, this case is impossible. In order to go from
-  //   CanDuplicateLastSampleDueToSleep() returning false to it returning true
-  //   requires an intermediate call to it in order for mSleep to go from
-  //   SLEEPING_NOT_OBSERVED to SLEEPING_OBSERVED.)
-  //
-  // - If CanDuplicateLastSampleDueToSleep() returns true but that result is
-  //   invalidated before being acted upon -- i.e. the thread wakes up before
-  //   DuplicateLastSample() is called -- we will duplicate the previous
-  //   sample. This is inaccurate, but only slightly... we will effectively
-  //   treat the thread as having slept a tiny bit longer than it really did.
-  //
-  // This latter inaccuracy could be avoided by moving the
-  // CanDuplicateLastSampleDueToSleep() check within the thread-freezing code,
-  // e.g. the section where Tick() is called. But that would reduce the
-  // effectiveness of the optimization because more code would have to be run
-  // before we can tell that duplication is allowed.
-  //
-  static const int AWAKE = 0;
-  static const int SLEEPING_NOT_OBSERVED = 1;
-  static const int SLEEPING_OBSERVED = 2;
-  mozilla::Atomic<int> mSleep;
-};
-
-// Contains data for partial profiles that get saved when
-// ThreadInfo::FlushSamplesAndMarkers gets called.
-struct PartialThreadProfile final
-{
-  PartialThreadProfile(mozilla::UniquePtr<char[]>&& aSamplesJSON,
-                       mozilla::UniquePtr<char[]>&& aMarkersJSON,
-                       mozilla::UniquePtr<UniqueStacks>&& aUniqueStacks)
-    : mSamplesJSON(mozilla::Move(aSamplesJSON))
-    , mMarkersJSON(mozilla::Move(aMarkersJSON))
-    , mUniqueStacks(mozilla::Move(aUniqueStacks))
-  {}
-
-  mozilla::UniquePtr<char[]> mSamplesJSON;
-  mozilla::UniquePtr<char[]> mMarkersJSON;
-  mozilla::UniquePtr<UniqueStacks> mUniqueStacks;
-};
-
-// This class contains the info for a single thread.
-//
-// Note: A thread's ThreadInfo can be held onto after the thread itself exits,
-// because we may need to output profiling information about that thread. But
-// some of the fields in this class are only relevant while the thread is
-// alive. It's possible that this class could be refactored so there is a
-// clearer split between those fields and the fields that are still relevant
-// after the thread exists.
+// This class contains information about a thread which needs to be stored
+// across restarts of the profiler and which can be useful even after the
+// thread has stopped running.
+// It uses threadsafe refcounting and only contains immutable data.
 class ThreadInfo final
 {
 public:
-  ThreadInfo(const char* aName, int aThreadId, bool aIsMainThread,
-             nsIEventTarget* aThread, void* aStackTop);
-
-  ~ThreadInfo();
-
-  const char* Name() const { return mName.get(); }
-
-  // This is a safe read even when the target thread is not blocked, as this
-  // thread id is never mutated.
-  int ThreadId() const { return RacyInfo()->ThreadId(); }
-
-  bool IsMainThread() const { return mIsMainThread; }
-
-  mozilla::NotNull<RacyThreadInfo*> RacyInfo() const { return mRacyInfo; }
-
-  void StartProfiling();
-  void StopProfiling();
-  bool IsBeingProfiled() { return mIsBeingProfiled; }
-
-  void NotifyUnregistered(uint64_t aBufferPosition)
-  {
-    mUnregisterTime = TimeStamp::Now();
-    mBufferPositionWhenUnregistered = mozilla::Some(aBufferPosition);
-  }
-  mozilla::Maybe<uint64_t> BufferPositionWhenUnregistered() { return mBufferPositionWhenUnregistered; }
-
-  PlatformData* GetPlatformData() const { return mPlatformData.get(); }
-  void* StackTop() const { return mStackTop; }
-
-  size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
-
-  mozilla::Maybe<uint64_t>& LastSample() { return mLastSample; }
-
-private:
-  mozilla::UniqueFreePtr<char> mName;
-  mozilla::TimeStamp mRegisterTime;
-  mozilla::TimeStamp mUnregisterTime;
-  mozilla::Maybe<uint64_t> mBufferPositionWhenUnregistered;
-  const bool mIsMainThread;
-  nsCOMPtr<nsIEventTarget> mThread;
-
-  // The thread's RacyThreadInfo. This is an owning pointer. It could be an
-  // inline member, but we don't do that because RacyThreadInfo is quite large
-  // (due to the PseudoStack within it), and we have ThreadInfo vectors and so
-  // we'd end up wasting a lot of space in those vectors for excess elements.
-  mozilla::NotNull<RacyThreadInfo*> mRacyInfo;
-
-  UniquePlatformData mPlatformData;
-  void* mStackTop;
-
-  //
-  // The following code is only used for threads that are being profiled, i.e.
-  // for which IsBeingProfiled() returns true.
-  //
+  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ThreadInfo)
 
-public:
-  void StreamJSON(const ProfileBuffer& aBuffer, SpliceableJSONWriter& aWriter,
-                  const mozilla::TimeStamp& aProcessStartTime,
-                  double aSinceTime);
-
-  // Call this method when the JS entries inside the buffer are about to
-  // become invalid, i.e., just before JS shutdown.
-  void FlushSamplesAndMarkers(const mozilla::TimeStamp& aProcessStartTime,
-                              ProfileBuffer& aBuffer);
-
-  // Returns nullptr if this is not the main thread or if this thread is not
-  // being profiled.
-  ThreadResponsiveness* GetThreadResponsiveness()
-  {
-    ThreadResponsiveness* responsiveness = mResponsiveness.ptrOr(nullptr);
-    MOZ_ASSERT(!responsiveness || mIsBeingProfiled);
-    return responsiveness;
-  }
-
-  // Set the JSContext of the thread to be sampled. Sampling cannot begin until
-  // this has been set.
-  void SetJSContext(JSContext* aContext)
+  ThreadInfo(const char* aName, int aThreadId, bool aIsMainThread)
+    : mName(aName)
+    , mRegisterTime(TimeStamp::Now())
+    , mThreadId(aThreadId)
+    , mIsMainThread(aIsMainThread)
   {
-    // This function runs on-thread.
-
-    MOZ_ASSERT(aContext && !mContext);
-
-    mContext = aContext;
-
-    // We give the JS engine a non-owning reference to the PseudoStack. It's
-    // important that the JS engine doesn't touch this once the thread dies.
-    js::SetContextProfilingStack(aContext, &RacyInfo()->PseudoStack());
-
-    PollJSSampling();
-  }
-
-  // Request that this thread start JS sampling. JS sampling won't actually
-  // start until a subsequent PollJSSampling() call occurs *and* mContext has
-  // been set.
-  void StartJSSampling()
-  {
-    // This function runs on-thread or off-thread.
-
-    MOZ_RELEASE_ASSERT(mJSSampling == INACTIVE ||
-                       mJSSampling == INACTIVE_REQUESTED);
-    mJSSampling = ACTIVE_REQUESTED;
-  }
-
-  // Request that this thread stop JS sampling. JS sampling won't actually stop
-  // until a subsequent PollJSSampling() call occurs.
-  void StopJSSampling()
-  {
-    // This function runs on-thread or off-thread.
-
-    MOZ_RELEASE_ASSERT(mJSSampling == ACTIVE ||
-                       mJSSampling == ACTIVE_REQUESTED);
-    mJSSampling = INACTIVE_REQUESTED;
+    // I don't know if we can assert this. But we should warn.
+    MOZ_ASSERT(aThreadId >= 0, "native thread ID is < 0");
+    MOZ_ASSERT(aThreadId <= INT32_MAX, "native thread ID is > INT32_MAX");
   }
 
-  // Poll to see if JS sampling should be started/stopped.
-  void PollJSSampling()
-  {
-    // This function runs on-thread.
-
-    // We can't start/stop profiling until we have the thread's JSContext.
-    if (mContext) {
-      // It is possible for mJSSampling to go through the following sequences.
-      //
-      // - INACTIVE, ACTIVE_REQUESTED, INACTIVE_REQUESTED, INACTIVE
-      //
-      // - ACTIVE, INACTIVE_REQUESTED, ACTIVE_REQUESTED, ACTIVE
-      //
-      // Therefore, the if and else branches here aren't always interleaved.
-      // This is ok because the JS engine can handle that.
-      //
-      if (mJSSampling == ACTIVE_REQUESTED) {
-        mJSSampling = ACTIVE;
-        js::EnableContextProfilingStack(mContext, true);
-        js::RegisterContextProfilingEventMarker(mContext, profiler_add_marker);
-
-      } else if (mJSSampling == INACTIVE_REQUESTED) {
-        mJSSampling = INACTIVE;
-        js::EnableContextProfilingStack(mContext, false);
-      }
-    }
-  }
-
-private:
-  bool mIsBeingProfiled;
-
-  // JS frames in the buffer may require a live JSRuntime to stream (e.g.,
-  // stringifying JIT frames). In the case of JSRuntime destruction,
-  // FlushSamplesAndMarkers should be called to save them. These are spliced
-  // into the final stream.
-  UniquePtr<PartialThreadProfile> mPartialProfile;
-
-  // This is used only for nsIThreads.
-  mozilla::Maybe<ThreadResponsiveness> mResponsiveness;
-
-public:
-  // If this is a JS thread, this is its JSContext, which is required for any
-  // JS sampling.
-  JSContext* mContext;
+  const char* Name() const { return mName.get(); }
+  mozilla::TimeStamp RegisterTime() const { return mRegisterTime; }
+  int ThreadId() const { return mThreadId; }
+  bool IsMainThread() const { return mIsMainThread; }
 
 private:
-  // The profiler needs to start and stop JS sampling of JS threads at various
-  // times. However, the JS engine can only do the required actions on the
-  // JS thread itself ("on-thread"), not from another thread ("off-thread").
-  // Therefore, we have the following two-step process.
-  //
-  // - The profiler requests (on-thread or off-thread) that the JS sampling be
-  //   started/stopped, by changing mJSSampling to the appropriate REQUESTED
-  //   state.
-  //
-  // - The relevant JS thread polls (on-thread) for changes to mJSSampling.
-  //   When it sees a REQUESTED state, it performs the appropriate actions to
-  //   actually start/stop JS sampling, and changes mJSSampling out of the
-  //   REQUESTED state.
-  //
-  // The state machine is as follows.
-  //
-  //             INACTIVE --> ACTIVE_REQUESTED
-  //                  ^       ^ |
-  //                  |     _/  |
-  //                  |   _/    |
-  //                  |  /      |
-  //                  | v       v
-  //   INACTIVE_REQUESTED <-- ACTIVE
-  //
-  // The polling is done in the following two ways.
-  //
-  // - Via the interrupt callback mechanism; the JS thread must call
-  //   profiler_js_interrupt_callback() from its own interrupt callback.
-  //   This is how sampling must be started/stopped for threads where the
-  //   request was made off-thread.
-  //
-  // - When {Start,Stop}JSSampling() is called on-thread, we can immediately
-  //   follow it with a PollJSSampling() call to avoid the delay between the
-  //   two steps. Likewise, setJSContext() calls PollJSSampling().
-  //
-  // One non-obvious thing about all this: these JS sampling requests are made
-  // on all threads, even non-JS threads. mContext needs to also be set (via
-  // setJSContext(), which can only happen for JS threads) for any JS sampling
-  // to actually happen.
-  //
-  enum {
-    INACTIVE = 0,
-    ACTIVE_REQUESTED = 1,
-    ACTIVE = 2,
-    INACTIVE_REQUESTED = 3,
-  } mJSSampling;
+  ~ThreadInfo() {}
 
-  // When sampling, this holds the position in ActivePS::mBuffer of the most
-  // recent sample for this thread, or Nothing() if there is no sample for this
-  // thread in the buffer.
-  mozilla::Maybe<uint64_t> mLastSample;
+  const nsCString mName;
+  const mozilla::TimeStamp mRegisterTime;
+  const int mThreadId;
+  const bool mIsMainThread;
 };
 
-void
-StreamSamplesAndMarkers(const char* aName, int aThreadId,
-                        const ProfileBuffer& aBuffer,
-                        SpliceableJSONWriter& aWriter,
-                        const mozilla::TimeStamp& aProcessStartTime,
-                        const TimeStamp& aRegisterTime,
-                        const TimeStamp& aUnregisterTime,
-                        double aSinceTime,
-                        JSContext* aContext,
-                        UniquePtr<char[]>&& aPartialSamplesJSON,
-                        UniquePtr<char[]>&& aPartialMarkersJSON,
-                        UniqueStacks& aUniqueStacks);
-
 #endif  // ThreadInfo_h
--- a/tools/profiler/core/platform-linux-android.cpp
+++ b/tools/profiler/core/platform-linux-android.cpp
@@ -291,27 +291,27 @@ Sampler::Disable(PSLockRef aLock)
   // Restore old signal handler. This is global state so it's important that
   // we do it now, while gPSMutex is locked.
   sigaction(SIGPROF, &mOldSigprofHandler, 0);
 }
 
 template<typename Func>
 void
 Sampler::SuspendAndSampleAndResumeThread(PSLockRef aLock,
-                                         const ThreadInfo& aThreadInfo,
+                                         const RegisteredThread& aRegisteredThread,
                                          const Func& aProcessRegs)
 {
   // Only one sampler thread can be sampling at once.  So we expect to have
   // complete control over |sSigHandlerCoordinator|.
   MOZ_ASSERT(!sSigHandlerCoordinator);
 
   if (mSamplerTid == -1) {
     mSamplerTid = gettid();
   }
-  int sampleeTid = aThreadInfo.ThreadId();
+  int sampleeTid = aRegisteredThread.Info()->ThreadId();
   MOZ_RELEASE_ASSERT(sampleeTid != mSamplerTid);
 
   //----------------------------------------------------------------//
   // Suspend the samplee thread and get its context.
 
   SigHandlerCoordinator coord;   // on sampler thread's stack
   sSigHandlerCoordinator = &coord;
 
--- a/tools/profiler/core/platform-macos.cpp
+++ b/tools/profiler/core/platform-macos.cpp
@@ -79,21 +79,21 @@ Sampler::Sampler(PSLockRef aLock)
 void
 Sampler::Disable(PSLockRef aLock)
 {
 }
 
 template<typename Func>
 void
 Sampler::SuspendAndSampleAndResumeThread(PSLockRef aLock,
-                                         const ThreadInfo& aThreadInfo,
+                                         const RegisteredThread& aRegisteredThread,
                                          const Func& aProcessRegs)
 {
   thread_act_t samplee_thread =
-    aThreadInfo.GetPlatformData()->ProfiledThread();
+    aRegisteredThread.GetPlatformData()->ProfiledThread();
 
   //----------------------------------------------------------------//
   // Suspend the samplee thread and get its context.
 
   // We're using thread_suspend on OS X because pthread_kill (which is what we
   // at one time used on Linux) has less consistent performance and causes
   // strange crashes, see bug 1166778 and bug 1166808.  thread_suspend
   // is also just a lot simpler to use.
--- a/tools/profiler/core/platform-win32.cpp
+++ b/tools/profiler/core/platform-win32.cpp
@@ -122,20 +122,20 @@ Sampler::Sampler(PSLockRef aLock)
 void
 Sampler::Disable(PSLockRef aLock)
 {
 }
 
 template<typename Func>
 void
 Sampler::SuspendAndSampleAndResumeThread(PSLockRef aLock,
-                                         const ThreadInfo& aThreadInfo,
+                                         const RegisteredThread& aRegisteredThread,
                                          const Func& aProcessRegs)
 {
-  HANDLE profiled_thread = aThreadInfo.GetPlatformData()->ProfiledThread();
+  HANDLE profiled_thread = aRegisteredThread.GetPlatformData()->ProfiledThread();
   if (profiled_thread == nullptr) {
     return;
   }
 
   // Context used for sampling the register state of the profiled thread.
   CONTEXT context;
   memset(&context, 0, sizeof(context));
 
--- a/tools/profiler/core/platform.cpp
+++ b/tools/profiler/core/platform.cpp
@@ -204,44 +204,33 @@ typedef const PSAutoLock& PSLockRef;
 // accidental unlocked accesses to global state. There are ways to circumvent
 // this mechanism, but please don't do so without *very* good reason and a
 // detailed explanation.
 //
 // The exceptions to this rule:
 //
 // - mProcessStartTime, because it's immutable;
 //
-// - each thread's RacyThreadInfo object is accessible without locking via
-//   TLSInfo::RacyThreadInfo().
+// - each thread's RacyRegisteredThread object is accessible without locking via
+//   TLSRegisteredThread::RacyRegisteredThread().
 class CorePS
 {
 private:
   CorePS()
     : mProcessStartTime(TimeStamp::ProcessCreation())
 #ifdef USE_LUL_STACKWALK
     , mLul(nullptr)
 #endif
   {}
 
   ~CorePS()
   {
-    while (!mLiveThreads.empty()) {
-      delete mLiveThreads.back();
-      mLiveThreads.pop_back();
-    }
-
-    while (!mDeadThreads.empty()) {
-      delete mDeadThreads.back();
-      mDeadThreads.pop_back();
-    }
   }
 
 public:
-  typedef std::vector<ThreadInfo*> ThreadVector;
-
   static void Create(PSLockRef aLock) { sInstance = new CorePS(); }
 
   static void Destroy(PSLockRef aLock)
   {
     delete sInstance;
     sInstance = nullptr;
   }
 
@@ -250,60 +239,50 @@ public:
   // thread that we don't have to worry about it being racy.
   static bool Exists() { return !!sInstance; }
 
   static void AddSizeOf(PSLockRef, MallocSizeOf aMallocSizeOf,
                         size_t& aProfSize, size_t& aLulSize)
   {
     aProfSize += aMallocSizeOf(sInstance);
 
-    for (uint32_t i = 0; i < sInstance->mLiveThreads.size(); i++) {
-      aProfSize +=
-        sInstance->mLiveThreads.at(i)->SizeOfIncludingThis(aMallocSizeOf);
-    }
-
-    for (uint32_t i = 0; i < sInstance->mDeadThreads.size(); i++) {
-      aProfSize +=
-        sInstance->mDeadThreads.at(i)->SizeOfIncludingThis(aMallocSizeOf);
+    for (auto& registeredThread : sInstance->mRegisteredThreads) {
+      aProfSize += registeredThread->SizeOfIncludingThis(aMallocSizeOf);
     }
 
     // Measurement of the following things may be added later if DMD finds it
     // is worthwhile:
-    // - CorePS::mLiveThreads itself (its elements' children are measured
+    // - CorePS::mRegisteredThreads itself (its elements' children are measured
     //   above)
-    // - CorePS::mDeadThreads itself (ditto)
     // - CorePS::mInterposeObserver
 
 #if defined(USE_LUL_STACKWALK)
     if (sInstance->mLul) {
       aLulSize += sInstance->mLul->SizeOfIncludingThis(aMallocSizeOf);
     }
 #endif
   }
 
   // No PSLockRef is needed for this field because it's immutable.
   PS_GET_LOCKLESS(TimeStamp, ProcessStartTime)
 
-  PS_GET(ThreadVector&, LiveThreads)
-  PS_GET(ThreadVector&, DeadThreads)
-
-  static void DiscardExpiredDeadThreads(PSLockRef, uint64_t aBufferRangeStart)
+  PS_GET(const nsTArray<UniquePtr<RegisteredThread>>&, RegisteredThreads)
+
+  static void AppendRegisteredThread(PSLockRef, UniquePtr<RegisteredThread>&& aRegisteredThread)
   {
-    // Discard any dead threads that were unregistered before aBufferRangeStart.
-    ThreadVector& deadThreads = sInstance->mDeadThreads;
-    for (size_t i = 0; i < deadThreads.size(); i++) {
-      Maybe<uint64_t> bufferPosition =
-        deadThreads.at(i)->BufferPositionWhenUnregistered();
-      MOZ_RELEASE_ASSERT(bufferPosition, "should have unregistered this thread");
-      if (*bufferPosition < aBufferRangeStart) {
-        delete deadThreads.at(i);
-        deadThreads.erase(deadThreads.begin() + i);
-        i--;
-      }
-    }
+    sInstance->mRegisteredThreads.AppendElement(Move(aRegisteredThread));
+  }
+
+  static void RemoveRegisteredThread(PSLockRef, RegisteredThread* aRegisteredThread)
+  {
+    // Remove aRegisteredThread from mRegisteredThreads.
+    // Can't use RemoveElement() because we can't equality-compare a UniquePtr
+    // to a raw pointer.
+    sInstance->mRegisteredThreads.RemoveElementsBy(
+      [&](UniquePtr<RegisteredThread>& rt) { return rt.get() == aRegisteredThread; });
   }
 
 #ifdef USE_LUL_STACKWALK
   static lul::LUL* Lul(PSLockRef) { return sInstance->mLul.get(); }
   static void SetLul(PSLockRef, UniquePtr<lul::LUL> aLul)
   {
     sInstance->mLul = Move(aLul);
   }
@@ -311,37 +290,39 @@ public:
 
 private:
   // The singleton instance
   static CorePS* sInstance;
 
   // The time that the process started.
   const TimeStamp mProcessStartTime;
 
-  // Info on all the registered threads, both live and dead. ThreadIds in
-  // mLiveThreads are unique. ThreadIds in mDeadThreads may not be, because
-  // ThreadIds can be reused. IsBeingProfiled() is true for all ThreadInfos in
-  // mDeadThreads because we don't hold on to ThreadInfos for non-profiled dead
-  // threads.
-  ThreadVector mLiveThreads;
-  ThreadVector mDeadThreads;
+  // Info on all the registered threads.
+  // ThreadIds in mRegisteredThreads are unique.
+  nsTArray<UniquePtr<RegisteredThread>> mRegisteredThreads;
 
 #ifdef USE_LUL_STACKWALK
   // LUL's state. Null prior to the first activation, non-null thereafter.
   UniquePtr<lul::LUL> mLul;
 #endif
 };
 
 CorePS* CorePS::sInstance = nullptr;
 
 class SamplerThread;
 
 static SamplerThread*
 NewSamplerThread(PSLockRef aLock, uint32_t aGeneration, double aInterval);
 
+struct LiveProfiledThreadData
+{
+  RegisteredThread* mRegisteredThread;
+  UniquePtr<ProfiledThreadData> mProfiledThreadData;
+};
+
 // This class contains the profiler's global state that is valid only when the
 // profiler is active. When not instantiated, the profiler is inactive.
 //
 // Accesses to ActivePS are guarded by gPSMutex, in much the same fashion as
 // CorePS.
 //
 class ActivePS
 {
@@ -489,16 +470,22 @@ public:
   }
 
   static size_t SizeOf(PSLockRef, MallocSizeOf aMallocSizeOf)
   {
     size_t n = aMallocSizeOf(sInstance);
 
     n += sInstance->mBuffer->SizeOfIncludingThis(aMallocSizeOf);
 
+    // Measurement of the following members may be added later if DMD finds it
+    // is worthwhile:
+    // - mLiveProfiledThreads (both the array itself, and the contents)
+    // - mDeadProfiledThreads (both the array itself, and the contents)
+    //
+
     return n;
   }
 
   static bool ShouldProfileThread(PSLockRef aLock, ThreadInfo* aInfo)
   {
     MOZ_RELEASE_ASSERT(sInstance);
 
     return ((aInfo->IsMainThread() || FeatureThreads(aLock)) &&
@@ -522,22 +509,100 @@ public:
   PROFILER_FOR_EACH_FEATURE(PS_GET_FEATURE)
 
   #undef PS_GET_FEATURE
 
   PS_GET(const Vector<std::string>&, Filters)
 
   static ProfileBuffer& Buffer(PSLockRef) { return *sInstance->mBuffer.get(); }
 
+  static const nsTArray<LiveProfiledThreadData>& LiveProfiledThreads(PSLockRef)
+  {
+    return sInstance->mLiveProfiledThreads;
+  }
+
+  // Returns an array containing (RegisteredThread*, ProfiledThreadData*) pairs
+  // for all threads that should be included in a profile, both for threads
+  // that are still registered, and for threads that have been unregistered but
+  // still have data in the buffer.
+  // For threads that have already been unregistered, the RegisteredThread
+  // pointer will be null.
+  // Do not hold on to the return value across thread registration or profiler
+  // restarts.
+  static nsTArray<Pair<RegisteredThread*, ProfiledThreadData*>> ProfiledThreads(PSLockRef)
+  {
+    nsTArray<Pair<RegisteredThread*, ProfiledThreadData*>> array;
+    for (auto& t : sInstance->mLiveProfiledThreads) {
+      array.AppendElement(MakePair(t.mRegisteredThread, t.mProfiledThreadData.get()));
+    }
+    for (auto& t : sInstance->mDeadProfiledThreads) {
+      array.AppendElement(MakePair((RegisteredThread*)nullptr, t.get()));
+    }
+    return array;
+  }
+
+  // Do a linear search through mLiveProfiledThreads to find the
+  // ProfiledThreadData object for a RegisteredThread.
+  static ProfiledThreadData* GetProfiledThreadData(PSLockRef,
+                                                   RegisteredThread* aRegisteredThread)
+  {
+    for (size_t i = 0; i < sInstance->mLiveProfiledThreads.Length(); i++) {
+      LiveProfiledThreadData& thread = sInstance->mLiveProfiledThreads[i];
+      if (thread.mRegisteredThread == aRegisteredThread) {
+        return thread.mProfiledThreadData.get();
+      }
+    }
+    return nullptr;
+  }
+
+  static void AddLiveProfiledThread(PSLockRef, RegisteredThread* aRegisteredThread,
+                                    UniquePtr<ProfiledThreadData>&& aProfiledThreadData)
+  {
+    sInstance->mLiveProfiledThreads.AppendElement(
+      LiveProfiledThreadData{ aRegisteredThread, Move(aProfiledThreadData) });
+  }
+
+  static void UnregisterThread(PSLockRef aLockRef, RegisteredThread* aRegisteredThread)
+  {
+    DiscardExpiredDeadProfiledThreads(aLockRef);
+
+    // Find the right entry in the mLiveProfiledThreads array and remove the
+    // element, moving the ProfiledThreadData object for the thread into the
+    // mDeadProfiledThreads array.
+    // The thread's RegisteredThread object gets destroyed here.
+    for (size_t i = 0; i < sInstance->mLiveProfiledThreads.Length(); i++) {
+      LiveProfiledThreadData& thread = sInstance->mLiveProfiledThreads[i];
+      if (thread.mRegisteredThread == aRegisteredThread) {
+        thread.mProfiledThreadData->NotifyUnregistered(sInstance->mBuffer->mRangeEnd);
+        sInstance->mDeadProfiledThreads.AppendElement(Move(thread.mProfiledThreadData));
+        sInstance->mLiveProfiledThreads.RemoveElementAt(i);
+        return;
+      }
+    }
+  }
+
   PS_GET_AND_SET(bool, IsPaused)
 
 #if defined(GP_OS_linux)
   PS_GET_AND_SET(bool, WasPaused)
 #endif
 
+  static void DiscardExpiredDeadProfiledThreads(PSLockRef)
+  {
+    uint64_t bufferRangeStart = sInstance->mBuffer->mRangeStart;
+    // Discard any dead threads that were unregistered before bufferRangeStart.
+    sInstance->mDeadProfiledThreads.RemoveElementsBy(
+      [bufferRangeStart](UniquePtr<ProfiledThreadData>& aProfiledThreadData) {
+        Maybe<uint64_t> bufferPosition =
+          aProfiledThreadData->BufferPositionWhenUnregistered();
+        MOZ_RELEASE_ASSERT(bufferPosition, "should have unregistered this thread");
+        return *bufferPosition < bufferRangeStart;
+      });
+  }
+
 private:
   // The singleton instance.
   static ActivePS* sInstance;
 
   // We need to track activity generations. If we didn't we could have the
   // following scenario.
   //
   // - profiler_stop() locks gPSMutex, de-instantiates ActivePS, unlocks
@@ -568,16 +633,24 @@ private:
 
   // Substrings of names of threads we want to profile.
   Vector<std::string> mFilters;
 
   // The buffer into which all samples are recorded. Always non-null. Always
   // used in conjunction with CorePS::m{Live,Dead}Threads.
   const UniquePtr<ProfileBuffer> mBuffer;
 
+  // ProfiledThreadData objects for any threads that were profiled at any point
+  // during this run of the profiler:
+  //  - mLiveProfiledThreads contains all threads that are still registered, and
+  //  - mDeadProfiledThreads contains all threads that have already been
+  //    unregistered but for which there is still data in the profile buffer.
+  nsTArray<LiveProfiledThreadData> mLiveProfiledThreads;
+  nsTArray<UniquePtr<ProfiledThreadData>> mDeadProfiledThreads;
+
   // The current sampler thread. This class is not responsible for destroying
   // the SamplerThread object; the Destroy() method returns it so the caller
   // can destroy it.
   SamplerThread* const mSamplerThread;
 
   // The interposer that records main thread I/O.
   const RefPtr<ProfilerIOInterposeObserver> mInterposeObserver;
 
@@ -598,72 +671,82 @@ uint32_t ActivePS::sNextGeneration = 0;
 #undef PS_GET_LOCKLESS
 #undef PS_GET_AND_SET
 
 // The mutex that guards accesses to CorePS and ActivePS.
 static PSMutex gPSMutex;
 
 Atomic<uint32_t> RacyFeatures::sActiveAndFeatures(0);
 
-// Each live thread has a ThreadInfo, and we store a reference to it in TLS.
+// Each live thread has a RegisteredThread, and we store a reference to it in TLS.
 // This class encapsulates that TLS.
-class TLSInfo
+class TLSRegisteredThread
 {
 public:
   static bool Init(PSLockRef)
   {
-    bool ok1 = sThreadInfo.init();
+    bool ok1 = sRegisteredThread.init();
     bool ok2 = AutoProfilerLabel::sPseudoStack.init();
     return ok1 && ok2;
   }
 
-  // Get the entire ThreadInfo. Accesses are guarded by gPSMutex.
-  static ThreadInfo* Info(PSLockRef) { return sThreadInfo.get(); }
-
-  // Get only the RacyThreadInfo. Accesses are not guarded by gPSMutex.
-  static RacyThreadInfo* RacyInfo()
+  // Get the entire RegisteredThread. Accesses are guarded by gPSMutex.
+  static class RegisteredThread* RegisteredThread(PSLockRef)
   {
-    ThreadInfo* info = sThreadInfo.get();
-    return info ? info->RacyInfo().get() : nullptr;
+    return sRegisteredThread.get();
+  }
+
+  // Get only the RacyRegisteredThread. Accesses are not guarded by gPSMutex.
+  static class RacyRegisteredThread* RacyRegisteredThread()
+  {
+    class RegisteredThread* registeredThread = sRegisteredThread.get();
+    return registeredThread ? &registeredThread->RacyRegisteredThread()
+                            : nullptr;
   }
 
-  // Get only the PseudoStack. Accesses are not guarded by gPSMutex. RacyInfo()
-  // can also be used to get the PseudoStack, but that is marginally slower
-  // because it requires an extra pointer indirection.
+  // Get only the PseudoStack. Accesses are not guarded by gPSMutex.
+  // RacyRegisteredThread() can also be used to get the PseudoStack, but that
+  // is marginally slower because it requires an extra pointer indirection.
   static PseudoStack* Stack() { return AutoProfilerLabel::sPseudoStack.get(); }
 
-  static void SetInfo(PSLockRef, ThreadInfo* aInfo)
+  static void SetRegisteredThread(PSLockRef,
+                                  class RegisteredThread* aRegisteredThread)
   {
-    sThreadInfo.set(aInfo);
+    sRegisteredThread.set(aRegisteredThread);
     AutoProfilerLabel::sPseudoStack.set(
-      aInfo ? &aInfo->RacyInfo()->PseudoStack() : nullptr);
+      aRegisteredThread
+        ? &aRegisteredThread->RacyRegisteredThread().PseudoStack()
+        : nullptr);
   }
 
 private:
-  // This is a non-owning reference to the ThreadInfo; CorePS::mLiveThreads is
-  // the owning reference. On thread destruction, this reference is cleared and
-  // the ThreadInfo is destroyed or transferred to CorePS::mDeadThreads.
-  static MOZ_THREAD_LOCAL(ThreadInfo*) sThreadInfo;
+  // This is a non-owning reference to the RegisteredThread;
+  // CorePS::mRegisteredThreads is the owning reference. On thread
+  // deregistration, this reference is cleared and the RegisteredThread is
+  // destroyed.
+  static MOZ_THREAD_LOCAL(class RegisteredThread*) sRegisteredThread;
 };
 
-MOZ_THREAD_LOCAL(ThreadInfo*) TLSInfo::sThreadInfo;
-
-// Although you can access a thread's PseudoStack via TLSInfo::sThreadInfo, we
-// also have a second TLS pointer directly to the PseudoStack. Here's why.
+MOZ_THREAD_LOCAL(RegisteredThread*) TLSRegisteredThread::sRegisteredThread;
+
+// Although you can access a thread's PseudoStack via
+// TLSRegisteredThread::sRegisteredThread, we also have a second TLS pointer
+// directly to the PseudoStack. Here's why.
 //
 // - We need to be able to push to and pop from the PseudoStack in
 //   AutoProfilerLabel.
 //
 // - The class functions are hot and must be defined in GeckoProfiler.h so they
 //   can be inlined.
 //
-// - We don't want to expose TLSInfo (and ThreadInfo) in GeckoProfiler.h.
+// - We don't want to expose TLSRegisteredThread (and RegisteredThread) in
+//   GeckoProfiler.h.
 //
 // This second pointer isn't ideal, but does provide a way to satisfy those
-// constraints. TLSInfo is responsible for updating it.
+// constraints. TLSRegisteredThread is responsible for updating it.
 MOZ_THREAD_LOCAL(PseudoStack*) AutoProfilerLabel::sPseudoStack;
 
 // The name of the main thread.
 static const char* const kMainThreadName = "GeckoMain";
 
 ////////////////////////////////////////////////////////////////////////
 // BEGIN sampling/unwinding code
 
@@ -729,28 +812,29 @@ struct AutoWalkJSStack
     }
   }
 };
 
 // Merges the pseudo-stack, native stack, and JS stack, outputting the details
 // to aCollector.
 static void
 MergeStacks(uint32_t aFeatures, bool aIsSynchronous,
-            const ThreadInfo& aThreadInfo, const Registers& aRegs,
+            const RegisteredThread& aRegisteredThread, const Registers& aRegs,
             const NativeStack& aNativeStack,
             ProfilerStackCollector& aCollector)
 {
   // WARNING: this function runs within the profiler's "critical section".
   // WARNING: this function might be called while the profiler is inactive, and
   //          cannot rely on ActivePS.
 
-  PseudoStack& pseudoStack = aThreadInfo.RacyInfo()->PseudoStack();
-  js::ProfileEntry* pseudoEntries = pseudoStack.entries;
+  const PseudoStack& pseudoStack =
+    aRegisteredThread.RacyRegisteredThread().PseudoStack();
+  const js::ProfileEntry* pseudoEntries = pseudoStack.entries;
   uint32_t pseudoCount = pseudoStack.stackSize();
-  JSContext* context = aThreadInfo.mContext;
+  JSContext* context = aRegisteredThread.GetJSContext();
 
   // Make a copy of the JS stack into a JSFrame array. This is necessary since,
   // like the native stack, the JS stack is iterated youngest-to-oldest and we
   // need to iterate oldest-to-youngest when adding entries to aInfo.
 
   // Non-periodic sampling passes Nothing() as the buffer write position to
   // ProfilingFrameIterator to avoid incorrectly resetting the buffer position
   // of sampled JIT entries inside the JS engine.
@@ -810,17 +894,17 @@ MergeStacks(uint32_t aFeatures, bool aIs
   // Iterate as long as there is at least one frame remaining.
   while (pseudoIndex != pseudoCount || jsIndex >= 0 || nativeIndex >= 0) {
     // There are 1 to 3 frames available. Find and add the oldest.
     uint8_t* pseudoStackAddr = nullptr;
     uint8_t* jsStackAddr = nullptr;
     uint8_t* nativeStackAddr = nullptr;
 
     if (pseudoIndex != pseudoCount) {
-      js::ProfileEntry& pseudoEntry = pseudoEntries[pseudoIndex];
+      const js::ProfileEntry& pseudoEntry = pseudoEntries[pseudoIndex];
 
       if (pseudoEntry.isCpp()) {
         lastPseudoCppStackAddr = (uint8_t*) pseudoEntry.stackAddress();
       }
 
       // Skip any JS_OSR frames. Such frames are used when the JS interpreter
       // enters a jit frame on a loop edge (via on-stack-replacement, or OSR).
       // To avoid both the pseudoframe and jit frame being recorded (and
@@ -860,17 +944,17 @@ MergeStacks(uint32_t aFeatures, bool aIs
     MOZ_ASSERT_IF(jsStackAddr, jsStackAddr != pseudoStackAddr &&
                                jsStackAddr != nativeStackAddr);
     MOZ_ASSERT_IF(nativeStackAddr, nativeStackAddr != pseudoStackAddr &&
                                    nativeStackAddr != jsStackAddr);
 
     // Check to see if pseudoStack frame is top-most.
     if (pseudoStackAddr > jsStackAddr && pseudoStackAddr > nativeStackAddr) {
       MOZ_ASSERT(pseudoIndex < pseudoCount);
-      js::ProfileEntry& pseudoEntry = pseudoEntries[pseudoIndex];
+      const js::ProfileEntry& pseudoEntry = pseudoEntries[pseudoIndex];
 
       // Pseudo-frames with the CPP_MARKER_FOR_JS kind are just annotations and
       // should not be recorded in the profile.
       if (pseudoEntry.kind() != js::ProfileEntry::Kind::CPP_MARKER_FOR_JS) {
         // The JIT only allows the top-most entry to have a nullptr pc.
         MOZ_ASSERT_IF(pseudoEntry.isJs() && pseudoEntry.script() && !pseudoEntry.pc(),
                       &pseudoEntry == &pseudoStack.entries[pseudoStack.stackSize() - 1]);
         aCollector.CollectPseudoEntry(pseudoEntry);
@@ -947,85 +1031,86 @@ StackWalkCallback(uint32_t aFrameNumber,
   nativeStack->mSPs[nativeStack->mCount] = aSP;
   nativeStack->mPCs[nativeStack->mCount] = aPC;
   nativeStack->mCount++;
 }
 #endif
 
 #if defined(USE_FRAME_POINTER_STACK_WALK)
 static void
-DoFramePointerBacktrace(PSLockRef aLock, const ThreadInfo& aThreadInfo,
+DoFramePointerBacktrace(PSLockRef aLock, const RegisteredThread& aRegisteredThread,
                         const Registers& aRegs, NativeStack& aNativeStack)
 {
   // WARNING: this function runs within the profiler's "critical section".
   // WARNING: this function might be called while the profiler is inactive, and
   //          cannot rely on ActivePS.
 
   // Start with the current function. We use 0 as the frame number here because
   // the FramePointerStackWalk() call below will use 1..N. This is a bit weird
   // but it doesn't matter because StackWalkCallback() doesn't use the frame
   // number argument.
   StackWalkCallback(/* frameNum */ 0, aRegs.mPC, aRegs.mSP, &aNativeStack);
 
   uint32_t maxFrames = uint32_t(MAX_NATIVE_FRAMES - aNativeStack.mCount);
 
-  void* stackEnd = aThreadInfo.StackTop();
+  const void* stackEnd = aRegisteredThread.StackTop();
   if (aRegs.mFP >= aRegs.mSP && aRegs.mFP <= stackEnd) {
     FramePointerStackWalk(StackWalkCallback, /* skipFrames */ 0, maxFrames,
                           &aNativeStack, reinterpret_cast<void**>(aRegs.mFP),
-                          stackEnd);
+                          const_cast<void*>(stackEnd));
   }
 }
 #endif
 
 #if defined(USE_MOZ_STACK_WALK)
 static void
-DoMozStackWalkBacktrace(PSLockRef aLock, const ThreadInfo& aThreadInfo,
+DoMozStackWalkBacktrace(PSLockRef aLock, const RegisteredThread& aRegisteredThread,
                         const Registers& aRegs, NativeStack& aNativeStack)
 {
   // WARNING: this function runs within the profiler's "critical section".
   // WARNING: this function might be called while the profiler is inactive, and
   //          cannot rely on ActivePS.
 
   // Start with the current function. We use 0 as the frame number here because
   // the MozStackWalkThread() call below will use 1..N. This is a bit weird but
   // it doesn't matter because StackWalkCallback() doesn't use the frame number
   // argument.
   StackWalkCallback(/* frameNum */ 0, aRegs.mPC, aRegs.mSP, &aNativeStack);
 
   uint32_t maxFrames = uint32_t(MAX_NATIVE_FRAMES - aNativeStack.mCount);
 
-  HANDLE thread = GetThreadHandle(aThreadInfo.GetPlatformData());
+  HANDLE thread = GetThreadHandle(aRegisteredThread.GetPlatformData());
   MOZ_ASSERT(thread);
   MozStackWalkThread(StackWalkCallback, /* skipFrames */ 0, maxFrames,
                      &aNativeStack, thread, /* context */ nullptr);
 }
 #endif
 
 #ifdef USE_EHABI_STACKWALK
 static void
-DoEHABIBacktrace(PSLockRef aLock, const ThreadInfo& aThreadInfo,
+DoEHABIBacktrace(PSLockRef aLock, const RegisteredThread& aRegisteredThread,
                  const Registers& aRegs, NativeStack& aNativeStack)
 {
   // WARNING: this function runs within the profiler's "critical section".
   // WARNING: this function might be called while the profiler is inactive, and
   //          cannot rely on ActivePS.
 
   const mcontext_t* mcontext = &aRegs.mContext->uc_mcontext;
   mcontext_t savedContext;
-  PseudoStack& pseudoStack = aThreadInfo.RacyInfo()->PseudoStack();
+  const PseudoStack& pseudoStack =
+    aRegisteredThread.RacyRegisteredThread().PseudoStack();
 
   // The pseudostack contains an "EnterJIT" frame whenever we enter
   // JIT code with profiling enabled; the stack pointer value points
   // the saved registers.  We use this to unwind resume unwinding
   // after encounting JIT code.
   for (uint32_t i = pseudoStack.stackSize(); i > 0; --i) {
     // The pseudostack grows towards higher indices, so we iterate
     // backwards (from callee to caller).
-    js::ProfileEntry& entry = pseudoStack.entries[i - 1];
+    const js::ProfileEntry& entry = pseudoStack.entries[i - 1];
     if (!entry.isJs() && strcmp(entry.label(), "EnterJIT") == 0) {
       // Found JIT entry frame.  Unwind up to that point (i.e., force
       // the stack walk to stop before the block of saved registers;
       // note that it yields nondecreasing stack pointers), then restore
       // the saved state.
       uint32_t* vSP = reinterpret_cast<uint32_t*>(entry.stackAddress());
 
       aNativeStack.mCount +=
@@ -1050,17 +1135,17 @@ DoEHABIBacktrace(PSLockRef aLock, const 
       savedContext.arm_pc  = savedContext.arm_lr;
       mcontext = &savedContext;
     }
   }
 
   // Now unwind whatever's left (starting from either the last EnterJIT frame
   // or, if no EnterJIT was found, the original registers).
   aNativeStack.mCount +=
-    EHABIStackWalk(*mcontext, aThreadInfo.StackTop(),
+    EHABIStackWalk(*mcontext, const_cast<void*>(aRegisteredThread.StackTop()),
                    aNativeStack.mSPs + aNativeStack.mCount,
                    aNativeStack.mPCs + aNativeStack.mCount,
                    MAX_NATIVE_FRAMES - aNativeStack.mCount);
 }
 #endif
 
 #ifdef USE_LUL_STACKWALK
 
@@ -1078,17 +1163,17 @@ ASAN_memcpy(void* aDst, const void* aSrc
 
   for (size_t i = 0; i < aLen; i++) {
     dst[i] = src[i];
   }
 }
 #endif
 
 static void
-DoLULBacktrace(PSLockRef aLock, const ThreadInfo& aThreadInfo,
+DoLULBacktrace(PSLockRef aLock, const RegisteredThread& aRegisteredThread,
                const Registers& aRegs, NativeStack& aNativeStack)
 {
   // WARNING: this function runs within the profiler's "critical section".
   // WARNING: this function might be called while the profiler is inactive, and
   //          cannot rely on ActivePS.
 
   const mcontext_t* mc = &aRegs.mContext->uc_mcontext;
 
@@ -1162,17 +1247,17 @@ DoLULBacktrace(PSLockRef aLock, const Th
     uintptr_t rEDZONE_SIZE = 0;
     uintptr_t start = startRegs.xsp.Value() - rEDZONE_SIZE;
 #elif defined(GP_PLAT_mips64_linux)
     uintptr_t rEDZONE_SIZE = 0;
     uintptr_t start = startRegs.sp.Value() - rEDZONE_SIZE;
 #else
 #   error "Unknown plat"
 #endif
-    uintptr_t end = reinterpret_cast<uintptr_t>(aThreadInfo.StackTop());
+    uintptr_t end = reinterpret_cast<uintptr_t>(aRegisteredThread.StackTop());
     uintptr_t ws  = sizeof(void*);
     start &= ~(ws-1);
     end   &= ~(ws-1);
     uintptr_t nToCopy = 0;
     if (start < end) {
       nToCopy = end - start;
       if (nToCopy > lul::N_STACK_BYTES)
         nToCopy = lul::N_STACK_BYTES;
@@ -1212,117 +1297,119 @@ DoLULBacktrace(PSLockRef aLock, const Th
   lul->mStats.mCFI     += aNativeStack.mCount - 1 - framePointerFramesAcquired;
   lul->mStats.mFP      += framePointerFramesAcquired;
 }
 
 #endif
 
 #ifdef HAVE_NATIVE_UNWIND
 static void
-DoNativeBacktrace(PSLockRef aLock, const ThreadInfo& aThreadInfo,
+DoNativeBacktrace(PSLockRef aLock, const RegisteredThread& aRegisteredThread,
                   const Registers& aRegs, NativeStack& aNativeStack)
 {
   // This method determines which stackwalker is used for periodic and
   // synchronous samples. (Backtrace samples are treated differently, see
   // profiler_suspend_and_sample_thread() for details). The only part of the
   // ordering that matters is that LUL must precede FRAME_POINTER, because on
   // Linux they can both be present.
 #if defined(USE_LUL_STACKWALK)
-  DoLULBacktrace(aLock, aThreadInfo, aRegs, aNativeStack);
+  DoLULBacktrace(aLock, aRegisteredThread, aRegs, aNativeStack);
 #elif defined(USE_EHABI_STACKWALK)
-  DoEHABIBacktrace(aLock, aThreadInfo, aRegs, aNativeStack);
+  DoEHABIBacktrace(aLock, aRegisteredThread, aRegs, aNativeStack);
 #elif defined(USE_FRAME_POINTER_STACK_WALK)
-  DoFramePointerBacktrace(aLock, aThreadInfo, aRegs, aNativeStack);
+  DoFramePointerBacktrace(aLock, aRegisteredThread, aRegs, aNativeStack);
 #elif defined(USE_MOZ_STACK_WALK)
-  DoMozStackWalkBacktrace(aLock, aThreadInfo, aRegs, aNativeStack);
+  DoMozStackWalkBacktrace(aLock, aRegisteredThread, aRegs, aNativeStack);
 #else
   #error "Invalid configuration"
 #endif
 }
 #endif
 
 // Writes some components shared by periodic and synchronous profiles to
 // ActivePS's ProfileBuffer. (This should only be called from DoSyncSample()
 // and DoPeriodicSample().)
 //
 // The grammar for entry sequences is in a comment above
 // ProfileBuffer::StreamSamplesToJSON.
 static inline void
 DoSharedSample(PSLockRef aLock, bool aIsSynchronous,
-               ThreadInfo& aThreadInfo, const TimeStamp& aNow,
+               RegisteredThread& aRegisteredThread, const TimeStamp& aNow,
                const Registers& aRegs, Maybe<uint64_t>* aLastSample,
                ProfileBuffer& aBuffer)
 {
   // WARNING: this function runs within the profiler's "critical section".
 
   MOZ_RELEASE_ASSERT(ActivePS::Exists(aLock));
 
-  uint64_t samplePos = aBuffer.AddThreadIdEntry(aThreadInfo.ThreadId());
+  uint64_t samplePos = aBuffer.AddThreadIdEntry(aRegisteredThread.Info()->ThreadId());
   if (aLastSample) {
     *aLastSample = Some(samplePos);
   }
 
   TimeDuration delta = aNow - CorePS::ProcessStartTime();
   aBuffer.AddEntry(ProfileBufferEntry::Time(delta.ToMilliseconds()));
 
   ProfileBufferCollector collector(aBuffer, ActivePS::Features(aLock),
                                    samplePos);
   NativeStack nativeStack;
 #if defined(HAVE_NATIVE_UNWIND)
   if (ActivePS::FeatureStackWalk(aLock)) {
-    DoNativeBacktrace(aLock, aThreadInfo, aRegs, nativeStack);
-
-    MergeStacks(ActivePS::Features(aLock), aIsSynchronous, aThreadInfo, aRegs,
-                nativeStack, collector);
+    DoNativeBacktrace(aLock, aRegisteredThread, aRegs, nativeStack);
+
+    MergeStacks(ActivePS::Features(aLock), aIsSynchronous, aRegisteredThread,
+                aRegs, nativeStack, collector);
   } else
 #endif
   {
-    MergeStacks(ActivePS::Features(aLock), aIsSynchronous, aThreadInfo, aRegs,
-                nativeStack, collector);
+    MergeStacks(ActivePS::Features(aLock), aIsSynchronous, aRegisteredThread,
+                aRegs, nativeStack, collector);
 
     // We can't walk the whole native stack, but we can record the top frame.
     if (ActivePS::FeatureLeaf(aLock)) {
       aBuffer.AddEntry(ProfileBufferEntry::NativeLeafAddr((void*)aRegs.mPC));
     }
   }
 }
 
 // Writes the components of a synchronous sample to the given ProfileBuffer.
 static void
-DoSyncSample(PSLockRef aLock, ThreadInfo& aThreadInfo, const TimeStamp& aNow,
-             const Registers& aRegs, ProfileBuffer& aBuffer)
+DoSyncSample(PSLockRef aLock, RegisteredThread& aRegisteredThread,
+             const TimeStamp& aNow, const Registers& aRegs,
+             ProfileBuffer& aBuffer)
 {
   // WARNING: this function runs within the profiler's "critical section".
 
-  DoSharedSample(aLock, /* isSynchronous = */ true, aThreadInfo, aNow, aRegs,
-                 /* lastSample = */ nullptr, aBuffer);
+  DoSharedSample(aLock, /* aIsSynchronous = */ true, aRegisteredThread, aNow,
+                 aRegs, /* aLastSample = */ nullptr, aBuffer);
 }
 
 // Writes the components of a periodic sample to ActivePS's ProfileBuffer.
 static void
-DoPeriodicSample(PSLockRef aLock, ThreadInfo& aThreadInfo,
+DoPeriodicSample(PSLockRef aLock, RegisteredThread& aRegisteredThread,
+                 ProfiledThreadData& aProfiledThreadData,
                  const TimeStamp& aNow, const Registers& aRegs,
                  int64_t aRSSMemory, int64_t aUSSMemory)
 {
   // WARNING: this function runs within the profiler's "critical section".
 
   ProfileBuffer& buffer = ActivePS::Buffer(aLock);
 
-  DoSharedSample(aLock, /* isSynchronous = */ false, aThreadInfo, aNow, aRegs,
-                 &aThreadInfo.LastSample(), buffer);
+  DoSharedSample(aLock, /* aIsSynchronous = */ false, aRegisteredThread, aNow,
+                 aRegs, &aProfiledThreadData.LastSample(), buffer);
 
   ProfilerMarkerLinkedList* pendingMarkersList =
-    aThreadInfo.RacyInfo()->GetPendingMarkers();
+    aRegisteredThread.RacyRegisteredThread().GetPendingMarkers();
   while (pendingMarkersList && pendingMarkersList->peek()) {
     ProfilerMarker* marker = pendingMarkersList->popHead();
     buffer.AddStoredMarker(marker);
     buffer.AddEntry(ProfileBufferEntry::Marker(marker));
   }
 
-  ThreadResponsiveness* resp = aThreadInfo.GetThreadResponsiveness();
+  ThreadResponsiveness* resp = aProfiledThreadData.GetThreadResponsiveness();
   if (resp && resp->HasData()) {
     double delta = resp->GetUnresponsiveDuration(
       (aNow - CorePS::ProcessStartTime()).ToMilliseconds());
     buffer.AddEntry(ProfileBufferEntry::Responsiveness(delta));
   }
 
   if (aRSSMemory != 0) {
     double rssMemory = static_cast<double>(aRSSMemory);
@@ -1405,26 +1492,21 @@ StreamTaskTracer(PSLockRef aLock, Splice
     for (uint32_t i = 0; i < data->Length(); ++i) {
       aWriter.StringElement((data->ElementAt(i)).get());
     }
   }
   aWriter.EndArray();
 
   aWriter.StartArrayProperty("threads");
   {
-    const CorePS::ThreadVector& liveThreads = CorePS::LiveThreads(aLock);
-    for (size_t i = 0; i < liveThreads.size(); i++) {
-      ThreadInfo* info = liveThreads.at(i);
-      StreamNameAndThreadId(aWriter, info->Name(), info->ThreadId());
-    }
-
-    CorePS::DiscardExpiredDeadThreads(aLock, ActivePS::Buffer(aLock).mRangeStart);
-    const CorePS::ThreadVector& deadThreads = CorePS::DeadThreads(aLock);
-    for (size_t i = 0; i < deadThreads.size(); i++) {
-      ThreadInfo* info = deadThreads.at(i);
+    ActivePS::DiscardExpiredDeadProfiledThreads(aLock);
+    nsTArray<Pair<RegisteredThread*, ProfiledThreadData*>> threads =
+      ActivePS::ProfiledThreads(aLock);
+    for (auto& thread : threads) {
+      RefPtr<ThreadInfo> info = thread.second()->Info();
       StreamNameAndThreadId(aWriter, info->Name(), info->ThreadId());
     }
   }
   aWriter.EndArray();
 
   aWriter.DoubleProperty(
     "start", static_cast<double>(tasktracer::GetStartTime()));
 #endif
@@ -1648,31 +1730,26 @@ locked_profiler_stream_json_for_this_pro
     aWriter.StartObjectProperty("tasktracer");
     StreamTaskTracer(aLock, aWriter);
     aWriter.EndObject();
   }
 
   // Lists the samples for each thread profile
   aWriter.StartArrayProperty("threads");
   {
-    const CorePS::ThreadVector& liveThreads = CorePS::LiveThreads(aLock);
-    for (size_t i = 0; i < liveThreads.size(); i++) {
-      ThreadInfo* info = liveThreads.at(i);
-      if (!info->IsBeingProfiled()) {
-        continue;
-      }
-      info->StreamJSON(buffer, aWriter, CorePS::ProcessStartTime(), aSinceTime);
-    }
-
-    CorePS::DiscardExpiredDeadThreads(aLock, ActivePS::Buffer(aLock).mRangeStart);
-    const CorePS::ThreadVector& deadThreads = CorePS::DeadThreads(aLock);
-    for (size_t i = 0; i < deadThreads.size(); i++) {
-      ThreadInfo* info = deadThreads.at(i);
-      MOZ_ASSERT(info->IsBeingProfiled());
-      info->StreamJSON(buffer, aWriter, CorePS::ProcessStartTime(), aSinceTime);
+    ActivePS::DiscardExpiredDeadProfiledThreads(aLock);
+    nsTArray<Pair<RegisteredThread*, ProfiledThreadData*>> threads =
+      ActivePS::ProfiledThreads(aLock);
+    for (auto& thread : threads) {
+      RegisteredThread* registeredThread = thread.first();
+      JSContext* cx =
+        registeredThread ? registeredThread->GetJSContext() : nullptr;
+      ProfiledThreadData* profiledThreadData = thread.second();
+      profiledThreadData->StreamJSON(buffer, cx, aWriter,
+                                     CorePS::ProcessStartTime(), aSinceTime);
     }
 
 #if defined(GP_OS_android)
     if (ActivePS::FeatureJava(aLock)) {
       java::GeckoJavaSampler::Pause();
 
       aWriter.Start();
       {
@@ -1824,17 +1901,17 @@ public:
 
   // This method suspends and resumes the samplee thread. It calls the passed-in
   // function-like object aProcessRegs (passing it a populated |const
   // Registers&| arg) while the samplee thread is suspended.
   //
   // Func must be a function-like object of type `void()`.
   template<typename Func>
   void SuspendAndSampleAndResumeThread(PSLockRef aLock,
-                                       const ThreadInfo& aThreadInfo,
+                                       const RegisteredThread& aRegisteredThread,
                                        const Func& aProcessRegs);
 
 private:
 #if defined(GP_OS_linux) || defined(GP_OS_android)
   // Used to restore the SIGPROF handler when ours is removed.
   struct sigaction mOldSigprofHandler;
 
   // This process' ID. Needed as an argument for tgkill in
@@ -1934,54 +2011,54 @@ SamplerThread::Run()
       // happens the generation won't match.
       if (ActivePS::Generation(lock) != mActivityGeneration) {
         return;
       }
 
       ActivePS::Buffer(lock).DeleteExpiredStoredMarkers();
 
       if (!ActivePS::IsPaused(lock)) {
-        const CorePS::ThreadVector& liveThreads = CorePS::LiveThreads(lock);
-        for (uint32_t i = 0; i < liveThreads.size(); i++) {
-          ThreadInfo* info = liveThreads.at(i);
-
-          if (!info->IsBeingProfiled()) {
-            // We are not interested in profiling this thread.
-            continue;
-          }
+        const nsTArray<LiveProfiledThreadData>& liveThreads =
+          ActivePS::LiveProfiledThreads(lock);
+
+        int64_t rssMemory = 0;
+        int64_t ussMemory = 0;
+        if (!liveThreads.IsEmpty() && ActivePS::FeatureMemory(lock)) {
+          rssMemory = nsMemoryReporterManager::ResidentFast();
+#if defined(GP_OS_linux) || defined(GP_OS_android)
+          ussMemory = nsMemoryReporterManager::ResidentUnique();
+#endif
+        }
+
+        for (auto& thread : liveThreads) {
+          RegisteredThread* registeredThread = thread.mRegisteredThread;
+          ProfiledThreadData* profiledThreadData =
+            thread.mProfiledThreadData.get();
+          RefPtr<ThreadInfo> info = registeredThread->Info();
 
           // If the thread is asleep and has been sampled before in the same
           // sleep episode, find and copy the previous sample, as that's
           // cheaper than taking a new sample.
-          if (info->RacyInfo()->CanDuplicateLastSampleDueToSleep()) {
+          if (registeredThread->RacyRegisteredThread().CanDuplicateLastSampleDueToSleep()) {
             bool dup_ok =
               ActivePS::Buffer(lock).DuplicateLastSample(
                 info->ThreadId(), CorePS::ProcessStartTime(),
-                info->LastSample());
+                profiledThreadData->LastSample());
             if (dup_ok) {
               continue;
             }
           }
 
-          info->GetThreadResponsiveness()->Update();
-
-          // We only get the memory measurements once for all live threads.
-          int64_t rssMemory = 0;
-          int64_t ussMemory = 0;
-          if (i == 0 && ActivePS::FeatureMemory(lock)) {
-            rssMemory = nsMemoryReporterManager::ResidentFast();
-#if defined(GP_OS_linux) || defined(GP_OS_android)
-            ussMemory = nsMemoryReporterManager::ResidentUnique();
-#endif
-          }
+          profiledThreadData->GetThreadResponsiveness()->Update();
 
           TimeStamp now = TimeStamp::Now();
-          SuspendAndSampleAndResumeThread(lock, *info,
+          SuspendAndSampleAndResumeThread(lock, *registeredThread,
                                           [&](const Registers& aRegs) {
-            DoPeriodicSample(lock, *info, now, aRegs, rssMemory, ussMemory);
+            DoPeriodicSample(lock, *registeredThread, *profiledThreadData, now,
+                             aRegs, rssMemory, ussMemory);
           });
         }
 
 #if defined(USE_LUL_STACKWALK)
         // The LUL unwind object accumulates frame statistics. Periodically we
         // should poke it to give it a chance to print those statistics.  This
         // involves doing I/O (fprintf, __android_log_print, etc.) and so
         // can't safely be done from the critical section inside
@@ -2102,69 +2179,69 @@ ParseFeaturesFromStringArray(const char*
   uint32_t features = 0;
   PROFILER_FOR_EACH_FEATURE(ADD_FEATURE_BIT)
 
   #undef ADD_FEATURE_BIT
 
   return features;
 }
 
-// Find the ThreadInfo for the current thread. This should only be called in
-// places where TLSInfo can't be used. On success, *aIndexOut is set to the
-// index if it is non-null.
-static ThreadInfo*
-FindLiveThreadInfo(PSLockRef aLock, int* aIndexOut = nullptr)
+// Find the RegisteredThread for the current thread. This should only be called
+// in places where TLSRegisteredThread can't be used.
+static RegisteredThread*
+FindCurrentThreadRegisteredThread(PSLockRef aLock)
 {
-  ThreadInfo* ret = nullptr;
   int id = Thread::GetCurrentId();
-  const CorePS::ThreadVector& liveThreads = CorePS::LiveThreads(aLock);
-  for (uint32_t i = 0; i < liveThreads.size(); i++) {
-    ThreadInfo* info = liveThreads.at(i);
-    if (info->ThreadId() == id) {
-      if (aIndexOut) {
-        *aIndexOut = i;
-      }
-      ret = info;
-      break;
+  const nsTArray<UniquePtr<RegisteredThread>>& registeredThreads =
+    CorePS::RegisteredThreads(aLock);
+  for (auto& registeredThread : registeredThreads) {
+    if (registeredThread->Info()->ThreadId() == id) {
+      return registeredThread.get();
     }
   }
 
-  return ret;
+  return nullptr;
 }
 
 static void
 locked_register_thread(PSLockRef aLock, const char* aName, void* aStackTop)
 {
   MOZ_RELEASE_ASSERT(CorePS::Exists());
 
-  MOZ_RELEASE_ASSERT(!FindLiveThreadInfo(aLock));
+  MOZ_RELEASE_ASSERT(!FindCurrentThreadRegisteredThread(aLock));
 
   VTUNE_REGISTER_THREAD(aName);
 
-  if (!TLSInfo::Init(aLock)) {
+  if (!TLSRegisteredThread::Init(aLock)) {
     return;
   }
 
-  ThreadInfo* info = new ThreadInfo(aName, Thread::GetCurrentId(),
-                                    NS_IsMainThread(),
-                                    NS_GetCurrentThreadNoCreate(),
-                                    aStackTop);
-  TLSInfo::SetInfo(aLock, info);
-
-  if (ActivePS::Exists(aLock) && ActivePS::ShouldProfileThread(aLock, info)) {
-    info->StartProfiling();
+  RefPtr<ThreadInfo> info =
+    new ThreadInfo(aName, Thread::GetCurrentId(), NS_IsMainThread());
+  UniquePtr<RegisteredThread> registeredThread =
+    MakeUnique<RegisteredThread>(info, NS_GetCurrentThreadNoCreate(),
+                                 aStackTop);
+
+  TLSRegisteredThread::SetRegisteredThread(aLock, registeredThread.get());
+
+  if (ActivePS::Exists(aLock) &&
+      ActivePS::ShouldProfileThread(aLock, info)) {
+    nsCOMPtr<nsIEventTarget> eventTarget = registeredThread->GetEventTarget();
+    ActivePS::AddLiveProfiledThread(aLock, registeredThread.get(),
+      MakeUnique<ProfiledThreadData>(info, eventTarget));
+
     if (ActivePS::FeatureJS(aLock)) {
       // This StartJSSampling() call is on-thread, so we can poll manually to
       // start JS sampling immediately.
-      info->StartJSSampling();
-      info->PollJSSampling();
+      registeredThread->StartJSSampling();
+      registeredThread->PollJSSampling();
     }
   }
 
-  CorePS::LiveThreads(aLock).push_back(info);
+  CorePS::AppendRegisteredThread(aLock, Move(registeredThread));
 }
 
 static void
 NotifyObservers(const char* aTopic, nsISupports* aSubject = nullptr)
 {
   if (!NS_IsMainThread()) {
     // Dispatch a task to the main thread that notifies observers.
     // If NotifyObservers is called both on and off the main thread within a
@@ -2426,18 +2503,18 @@ profiler_shutdown()
       }
 
       samplerThread = locked_profiler_stop(lock);
     }
 
     CorePS::Destroy(lock);
 
     // We just destroyed CorePS and the ThreadInfos it contains, so we can
-    // clear this thread's TLSInfo.
-    TLSInfo::SetInfo(lock, nullptr);
+    // clear this thread's TLSRegisteredThread.
+    TLSRegisteredThread::SetRegisteredThread(lock, nullptr);
 
 #ifdef MOZ_TASK_TRACER
     tasktracer::ShutdownTaskTracer();
 #endif
   }
 
   // We do these operations with gPSMutex unlocked. The comments in
   // profiler_stop() explain why.
@@ -2662,22 +2739,23 @@ profiler_get_buffer_info()
 
 static void
 PollJSSamplingForCurrentThread()
 {
   MOZ_RELEASE_ASSERT(CorePS::Exists());
 
   PSAutoLock lock(gPSMutex);
 
-  ThreadInfo* info = TLSInfo::Info(lock);
-  if (!info) {
+  RegisteredThread* registeredThread =
+    TLSRegisteredThread::RegisteredThread(lock);
+  if (!registeredThread) {
     return;
   }
 
-  info->PollJSSampling();
+  registeredThread->PollJSSampling();
 }
 
 // When the profiler is started on a background thread, we can't synchronously
 // call PollJSSampling on the main thread's ThreadInfo. And the next regular
 // call to PollJSSampling on the main thread would only happen once the main
 // thread triggers a JS interrupt callback.
 // This means that all the JS execution between profiler_start() and the first
 // JS interrupt would happen with JS sampling disabled, and we wouldn't get any
@@ -2733,43 +2811,42 @@ locked_profiler_start(PSLockRef aLock, u
   // Fall back to the default values if the passed-in values are unreasonable.
   uint32_t entries = aEntries > 0 ? aEntries : PROFILER_DEFAULT_ENTRIES;
   double interval = aInterval > 0 ? aInterval : PROFILER_DEFAULT_INTERVAL;
 
   ActivePS::Create(aLock, entries, interval, aFeatures, aFilters, aFilterCount);
 
   // Set up profiling for each registered thread, if appropriate.
   int tid = Thread::GetCurrentId();
-  const CorePS::ThreadVector& liveThreads = CorePS::LiveThreads(aLock);
-  for (uint32_t i = 0; i < liveThreads.size(); i++) {
-    ThreadInfo* info = liveThreads.at(i);
+  const nsTArray<UniquePtr<RegisteredThread>>& registeredThreads =
+    CorePS::RegisteredThreads(aLock);
+  for (auto& registeredThread : registeredThreads) {
+    RefPtr<ThreadInfo> info = registeredThread->Info();
 
     if (ActivePS::ShouldProfileThread(aLock, info)) {
-      info->StartProfiling();
+      nsCOMPtr<nsIEventTarget> eventTarget = registeredThread->GetEventTarget();
+      ActivePS::AddLiveProfiledThread(aLock, registeredThread.get(),
+        MakeUnique<ProfiledThreadData>(info, eventTarget));
       if (ActivePS::FeatureJS(aLock)) {
-        info->StartJSSampling();
+        registeredThread->StartJSSampling();
         if (info->ThreadId() == tid) {
           // We can manually poll the current thread so it starts sampling
           // immediately.
-          info->PollJSSampling();
+          registeredThread->PollJSSampling();
         } else if (info->IsMainThread()) {
           // Dispatch a runnable to the main thread to call PollJSSampling(),
           // so that we don't have wait for the next JS interrupt callback in
           // order to start profiling JS.
           TriggerPollJSSamplingOnMainThread();
         }
       }
+      registeredThread->RacyRegisteredThread().ReinitializeOnResume();
     }
   }
 
-  // Dead ThreadInfos are deleted in profiler_stop(), and dead ThreadInfos
-  // aren't saved when the profiler is inactive. Therefore the dead threads
-  // vector should be empty here.
-  MOZ_RELEASE_ASSERT(CorePS::DeadThreads(aLock).empty());
-
 #ifdef MOZ_TASK_TRACER
   if (ActivePS::FeatureTaskTracer(aLock)) {
     tasktracer::StartLogging();
   }
 #endif
 
 #if defined(GP_OS_android)
   if (ActivePS::FeatureJava(aLock)) {
@@ -2882,39 +2959,30 @@ locked_profiler_stop(PSLockRef aLock)
 #ifdef MOZ_TASK_TRACER
   if (ActivePS::FeatureTaskTracer(aLock)) {
     tasktracer::StopLogging();
   }
 #endif
 
   // Stop sampling live threads.
   int tid = Thread::GetCurrentId();
-  CorePS::ThreadVector& liveThreads = CorePS::LiveThreads(aLock);
-  for (uint32_t i = 0; i < liveThreads.size(); i++) {
-    ThreadInfo* info = liveThreads.at(i);
-    if (info->IsBeingProfiled()) {
-      if (ActivePS::FeatureJS(aLock)) {
-        info->StopJSSampling();
-        if (info->ThreadId() == tid) {
-          // We can manually poll the current thread so it stops profiling
-          // immediately.
-          info->PollJSSampling();
-        }
+  const nsTArray<LiveProfiledThreadData>& liveProfiledThreads =
+    ActivePS::LiveProfiledThreads(aLock);
+  for (auto& thread : liveProfiledThreads) {
+    RegisteredThread* registeredThread = thread.mRegisteredThread;
+    if (ActivePS::FeatureJS(aLock)) {
+      registeredThread->StopJSSampling();
+      if (registeredThread->Info()->ThreadId() == tid) {
+        // We can manually poll the current thread so it stops profiling
+        // immediately.
+        registeredThread->PollJSSampling();
       }
-      info->StopProfiling();
     }
   }
 
-  // This is where we destroy the ThreadInfos for all dead threads.
-  CorePS::ThreadVector& deadThreads = CorePS::DeadThreads(aLock);
-  while (!deadThreads.empty()) {
-    delete deadThreads.back();
-    deadThreads.pop_back();
-  }
-
   // The Stop() call doesn't actually stop Run(); that happens in this
   // function's caller when the sampler thread is destroyed. Stop() just gives
   // the SamplerThread a chance to do some cleanup with gPSMutex locked.
   SamplerThread* samplerThread = ActivePS::Destroy(aLock);
   samplerThread->Stop(aLock);
 
   return samplerThread;
 }
@@ -3045,92 +3113,94 @@ profiler_register_thread(const char* aNa
 void
 profiler_unregister_thread()
 {
   MOZ_ASSERT_IF(NS_IsMainThread(), Scheduler::IsCooperativeThread());
   MOZ_RELEASE_ASSERT(CorePS::Exists());
 
   PSAutoLock lock(gPSMutex);
 
-  // We don't call ThreadInfo::StopJSSampling() here; there's no point doing
-  // that for a JS thread that is in the process of disappearing.
-
-  int i;
-  ThreadInfo* info = FindLiveThreadInfo(lock, &i);
-  MOZ_RELEASE_ASSERT(info == TLSInfo::Info(lock));
-  if (info) {
+  // We don't call RegisteredThread::StopJSSampling() here; there's no point
+  // doing that for a JS thread that is in the process of disappearing.
+
+  RegisteredThread* registeredThread = FindCurrentThreadRegisteredThread(lock);
+  MOZ_RELEASE_ASSERT(registeredThread == TLSRegisteredThread::RegisteredThread(lock));
+  if (registeredThread) {
+    RefPtr<ThreadInfo> info = registeredThread->Info();
+
     DEBUG_LOG("profiler_unregister_thread: %s", info->Name());
-    if (ActivePS::Exists(lock) && info->IsBeingProfiled()) {
-      info->NotifyUnregistered(ActivePS::Buffer(lock).mRangeEnd);
-      CorePS::DeadThreads(lock).push_back(info);
-      CorePS::DiscardExpiredDeadThreads(lock, ActivePS::Buffer(lock).mRangeStart);
-    } else {
-      delete info;
+
+    if (ActivePS::Exists(lock)) {
+      ActivePS::UnregisterThread(lock, registeredThread);
     }
-    CorePS::ThreadVector& liveThreads = CorePS::LiveThreads(lock);
-    liveThreads.erase(liveThreads.begin() + i);
-
-    // Whether or not we just destroyed the ThreadInfo or transferred it to the
-    // dead thread vector, we no longer need to access it via TLS.
-    TLSInfo::SetInfo(lock, nullptr);
-
+
+    // Clear the pointer to the RegisteredThread object that we're about to
+    // destroy.
+    TLSRegisteredThread::SetRegisteredThread(lock, nullptr);
+
+    // Remove the thread from the list of registered threads. This deletes the
+    // registeredThread object.
+    CorePS::RemoveRegisteredThread(lock, registeredThread);
   } else {
-    // There are two ways FindLiveThreadInfo() might have failed.
+    // There are two ways FindCurrentThreadRegisteredThread() might have failed.
     //
-    // - TLSInfo::Init() failed in locked_register_thread().
+    // - TLSRegisteredThread::Init() failed in locked_register_thread().
     //
     // - We've already called profiler_unregister_thread() for this thread.
     //   (Whether or not it should, this does happen in practice.)
     //
-    // Either way, TLSInfo should be empty.
-    MOZ_RELEASE_ASSERT(!TLSInfo::Info(lock));
+    // Either way, TLSRegisteredThread should be empty.
+    MOZ_RELEASE_ASSERT(!TLSRegisteredThread::RegisteredThread(lock));
   }
 }
 
 void
 profiler_thread_sleep()
 {
   // This function runs both on and off the main thread.
 
   MOZ_RELEASE_ASSERT(CorePS::Exists());
 
-  RacyThreadInfo* racyInfo = TLSInfo::RacyInfo();
-  if (!racyInfo) {
+  RacyRegisteredThread* racyRegisteredThread =
+    TLSRegisteredThread::RacyRegisteredThread();
+  if (!racyRegisteredThread) {
     return;
   }
 
-  racyInfo->SetSleeping();
+  racyRegisteredThread->SetSleeping();
 }
 
 void
 profiler_thread_wake()
 {
   // This function runs both on and off the main thread.
 
   MOZ_RELEASE_ASSERT(CorePS::Exists());
 
-  RacyThreadInfo* racyInfo = TLSInfo::RacyInfo();
-  if (!racyInfo) {
+  RacyRegisteredThread* racyRegisteredThread =
+    TLSRegisteredThread::RacyRegisteredThread();
+  if (!racyRegisteredThread) {
     return;
   }
 
-  racyInfo->SetAwake();
+  racyRegisteredThread->SetAwake();
 }
 
 bool
 profiler_thread_is_sleeping()
 {
   MOZ_RELEASE_ASSERT(NS_IsMainThread());
   MOZ_RELEASE_ASSERT(CorePS::Exists());
 
-  RacyThreadInfo* racyInfo = TLSInfo::RacyInfo();
-  if (!racyInfo) {
+  RacyRegisteredThread* racyRegisteredThread =
+    TLSRegisteredThread::RacyRegisteredThread();
+  if (!racyRegisteredThread) {
     return false;
   }
-  return racyInfo->IsSleeping();
+  return racyRegisteredThread->IsSleeping();
 }
 
 void
 profiler_js_interrupt_callback()
 {
   // This function runs on JS threads being sampled.
   PollJSSamplingForCurrentThread();
 }
@@ -3150,19 +3220,20 @@ profiler_get_backtrace()
   MOZ_RELEASE_ASSERT(CorePS::Exists());
 
   PSAutoLock lock(gPSMutex);
 
   if (!ActivePS::Exists(lock) || ActivePS::FeaturePrivacy(lock)) {
     return nullptr;
   }
 
-  ThreadInfo* info = TLSInfo::Info(lock);
-  if (!info) {
-    MOZ_ASSERT(info);
+  RegisteredThread* registeredThread =
+    TLSRegisteredThread::RegisteredThread(lock);
+  if (!registeredThread) {
+    MOZ_ASSERT(registeredThread);
     return nullptr;
   }
 
   int tid = Thread::GetCurrentId();
 
   TimeStamp now = TimeStamp::Now();
 
   Registers regs;
@@ -3170,17 +3241,17 @@ profiler_get_backtrace()
   regs.SyncPopulate();
 #else
   regs.Clear();
 #endif
 
   // 1000 should be plenty for a single backtrace.
   auto buffer = MakeUnique<ProfileBuffer>(1000);
 
-  DoSyncSample(lock, *info, now, regs, *buffer.get());
+  DoSyncSample(lock, *registeredThread, now, regs, *buffer.get());
 
   return UniqueProfilerBacktrace(
     new ProfilerBacktrace("SyncProfile", tid, Move(buffer)));
 }
 
 void
 ProfilerBacktraceDestructor::operator()(ProfilerBacktrace* aBacktrace)
 {
@@ -3195,27 +3266,28 @@ racy_profiler_add_marker(const char* aMa
 
   // We don't assert that RacyFeatures::IsActiveWithoutPrivacy() is true here,
   // because it's possible that the result has changed since we tested it in
   // the caller.
   //
   // Because of this imprecision it's possible to miss a marker or record one
   // we shouldn't. Either way is not a big deal.
 
-  RacyThreadInfo* racyInfo = TLSInfo::RacyInfo();
-  if (!racyInfo) {
+  RacyRegisteredThread* racyRegisteredThread =
+    TLSRegisteredThread::RacyRegisteredThread();
+  if (!racyRegisteredThread) {
     return;
   }
 
   TimeStamp origin = (aPayload && !aPayload->GetStartTime().IsNull())
-                   ? aPayload->GetStartTime()
-                   : TimeStamp::Now();
+                       ? aPayload->GetStartTime()
+                       : TimeStamp::Now();
   TimeDuration delta = origin - CorePS::ProcessStartTime();
-  racyInfo->AddPendingMarker(aMarkerName, Move(aPayload),
-                             delta.ToMilliseconds());
+  racyRegisteredThread->AddPendingMarker(aMarkerName, Move(aPayload),
+                                         delta.ToMilliseconds());
 }
 
 void
 profiler_add_marker(const char* aMarkerName,
                     UniquePtr<ProfilerMarkerPayload> aPayload)
 {
   MOZ_RELEASE_ASSERT(CorePS::Exists());
 
@@ -3251,19 +3323,20 @@ profiler_add_marker_for_thread(int aThre
     new ProfilerMarker(aMarkerName, aThreadId, Move(aPayload),
                        delta.ToMilliseconds());
 
   PSAutoLock lock(gPSMutex);
 
 #ifdef DEBUG
   // Assert that our thread ID makes sense
   bool realThread = false;
-  const CorePS::ThreadVector& liveThreads = CorePS::LiveThreads(lock);
-  for (uint32_t i = 0; i < liveThreads.size(); i++) {
-    ThreadInfo* info = liveThreads.at(i);
+  const nsTArray<UniquePtr<RegisteredThread>>& registeredThreads =
+    CorePS::RegisteredThreads(lock);
+  for (auto& thread : registeredThreads) {
+    RefPtr<ThreadInfo> info = thread->Info();
     if (info->ThreadId() == aThreadId) {
       realThread = true;
       break;
     }
   }
   MOZ_ASSERT(realThread, "Invalid thread id");
 #endif
 
@@ -3306,77 +3379,87 @@ profiler_tracing(const char* aCategory, 
   auto payload =
     MakeUnique<TracingMarkerPayload>(aCategory, aKind, Move(aCause));
   racy_profiler_add_marker(aMarkerName, Move(payload));
 }
 
 PseudoStack*
 profiler_get_pseudo_stack()
 {
-  return TLSInfo::Stack();
+  return TLSRegisteredThread::Stack();
 }
 
 void
 profiler_set_js_context(JSContext* aCx)
 {
   MOZ_ASSERT(aCx);
 
   PSAutoLock lock(gPSMutex);
 
-  ThreadInfo* info = TLSInfo::Info(lock);
-  if (!info) {
+  RegisteredThread* registeredThread =
+    TLSRegisteredThread::RegisteredThread(lock);
+  if (!registeredThread) {
     return;
   }
 
-  info->SetJSContext(aCx);
+  registeredThread->SetJSContext(aCx);
 
   // This call is on-thread, so we can call PollJSSampling() to start JS
   // sampling immediately.
-  info->PollJSSampling();
+  registeredThread->PollJSSampling();
 }
 
 void
 profiler_clear_js_context()
 {
   MOZ_RELEASE_ASSERT(CorePS::Exists());
 
   PSAutoLock lock(gPSMutex);
 
-  ThreadInfo* info = TLSInfo::Info(lock);
-  if (!info || !info->mContext) {
+  RegisteredThread* registeredThread =
+    TLSRegisteredThread::RegisteredThread(lock);
+  if (!registeredThread) {
+    return;
+  }
+
+  JSContext* cx = registeredThread->GetJSContext();
+  if (!cx) {
     return;
   }
 
   // On JS shut down, flush the current buffer as stringifying JIT samples
   // requires a live JSContext.
 
   if (ActivePS::Exists(lock)) {
-    // Flush this thread's ThreadInfo, if it is being profiled.
-    if (info->IsBeingProfiled()) {
-      info->FlushSamplesAndMarkers(CorePS::ProcessStartTime(),
-                                   ActivePS::Buffer(lock));
+    // Flush this thread's profile data, if it is being profiled.
+    ProfiledThreadData* profiledThreadData =
+      ActivePS::GetProfiledThreadData(lock, registeredThread);
+    if (profiledThreadData) {
+      profiledThreadData->FlushSamplesAndMarkers(cx,
+                                                 CorePS::ProcessStartTime(),
+                                                 ActivePS::Buffer(lock));
 
       if (ActivePS::FeatureJS(lock)) {
         // Notify the JS context that profiling for this context has stopped.
         // Do this by calling StopJSSampling and PollJSSampling before
         // nulling out the JSContext.
-        info->StopJSSampling();
-        info->PollJSSampling();
-
-        info->mContext = nullptr;
-
-        // Tell the ThreadInfo that we'd like to have JS sampling on this
+        registeredThread->StopJSSampling();
+        registeredThread->PollJSSampling();
+
+        registeredThread->ClearJSContext();
+
+        // Tell the thread that we'd like to have JS sampling on this
         // thread again, once it gets a new JSContext (if ever).
-        info->StartJSSampling();
+        registeredThread->StartJSSampling();
         return;
       }
     }
   }
 
-  info->mContext = nullptr;
+  registeredThread->ClearJSContext();
 }
 
 int
 profiler_current_thread_id()
 {
   return Thread::GetCurrentId();
 }
 
@@ -3387,55 +3470,57 @@ void
 profiler_suspend_and_sample_thread(int aThreadId,
                                    uint32_t aFeatures,
                                    ProfilerStackCollector& aCollector,
                                    bool aSampleNative /* = true */)
 {
   // Lock the profiler mutex
   PSAutoLock lock(gPSMutex);
 
-  const CorePS::ThreadVector& liveThreads = CorePS::LiveThreads(lock);
-  for (uint32_t i = 0; i < liveThreads.size(); i++) {
-    ThreadInfo* info = liveThreads.at(i);
+  const nsTArray<UniquePtr<RegisteredThread>>& registeredThreads =
+    CorePS::RegisteredThreads(lock);
+  for (auto& thread : registeredThreads) {
+    RefPtr<ThreadInfo> info = thread->Info();
+    RegisteredThread& registeredThread = *thread.get();
 
     if (info->ThreadId() == aThreadId) {
       if (info->IsMainThread()) {
         aCollector.SetIsMainThread();
       }
 
       // Allocate the space for the native stack
       NativeStack nativeStack;
 
       // Suspend, sample, and then resume the target thread.
       Sampler sampler(lock);
-      sampler.SuspendAndSampleAndResumeThread(lock, *info,
+      sampler.SuspendAndSampleAndResumeThread(lock, registeredThread,
                                               [&](const Registers& aRegs) {
         // The target thread is now suspended. Collect a native backtrace, and
         // call the callback.
         bool isSynchronous = false;
 #if defined(HAVE_FASTINIT_NATIVE_UNWIND)
         if (aSampleNative) {
           // We can only use FramePointerStackWalk or MozStackWalk from
           // suspend_and_sample_thread as other stackwalking methods may not be
           // initialized.
 # if defined(USE_FRAME_POINTER_STACK_WALK)
-          DoFramePointerBacktrace(lock, *info, aRegs, nativeStack);
+          DoFramePointerBacktrace(lock, registeredThread, aRegs, nativeStack);
 # elif defined(USE_MOZ_STACK_WALK)
-          DoMozStackWalkBacktrace(lock, *info, aRegs, nativeStack);
+          DoMozStackWalkBacktrace(lock, registeredThread, aRegs, nativeStack);
 # else
 #  error "Invalid configuration"
 # endif
 
-          MergeStacks(aFeatures, isSynchronous, *info, aRegs, nativeStack,
-                      aCollector);
+          MergeStacks(aFeatures, isSynchronous, registeredThread, aRegs,
+                      nativeStack, aCollector);
         } else
 #endif
         {
-          MergeStacks(aFeatures, isSynchronous, *info, aRegs, nativeStack,
-                      aCollector);
+          MergeStacks(aFeatures, isSynchronous, registeredThread, aRegs,
+                      nativeStack, aCollector);
 
           if (ProfilerFeature::HasLeaf(aFeatures)) {
             aCollector.CollectNativeLeafAddr((void*)aRegs.mPC);
           }
         }
       });
 
       // NOTE: Make sure to disable the sampler before it is destroyed, in case
--- a/tools/profiler/moz.build
+++ b/tools/profiler/moz.build
@@ -16,20 +16,21 @@ if CONFIG['MOZ_GECKO_PROFILER']:
         'public/ProfilerMarkerPayload.h',
         'public/ProfilerParent.h',
         'public/shared-libraries.h',
     ]
     UNIFIED_SOURCES += [
         'core/platform.cpp',
         'core/ProfileBuffer.cpp',
         'core/ProfileBufferEntry.cpp',
+        'core/ProfiledThreadData.cpp',
         'core/ProfileJSONWriter.cpp',
         'core/ProfilerBacktrace.cpp',
         'core/ProfilerMarkerPayload.cpp',
-        'core/ThreadInfo.cpp',
+        'core/RegisteredThread.cpp',
         'gecko/ChildProfilerController.cpp',
         'gecko/nsProfilerFactory.cpp',
         'gecko/nsProfilerStartParams.cpp',
         'gecko/ProfilerChild.cpp',
         'gecko/ProfilerIOInterposeObserver.cpp',
         'gecko/ProfilerParent.cpp',
         'gecko/ThreadResponsiveness.cpp',
     ]
--- a/tools/profiler/tests/gtest/ThreadProfileTest.cpp
+++ b/tools/profiler/tests/gtest/ThreadProfileTest.cpp
@@ -4,63 +4,42 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "gtest/gtest.h"
 
 #include "ProfileBufferEntry.h"
 #include "ThreadInfo.h"
 
-// Make sure we can initialize our thread profile
-TEST(ThreadProfile, Initialization) {
-  int tid = 1000;
-  nsCOMPtr<nsIThread> mainThread;
-  NS_GetMainThread(getter_AddRefs(mainThread));
-  ThreadInfo info("testThread", tid, true, mainThread, nullptr);
-  info.StartProfiling();
-}
-
 // Make sure we can record one entry and read it
 TEST(ThreadProfile, InsertOneEntry) {
-  int tid = 1000;
-  nsCOMPtr<nsIThread> mainThread;
-  NS_GetMainThread(getter_AddRefs(mainThread));
-  ThreadInfo info("testThread", tid, true, mainThread, nullptr);
   auto pb = MakeUnique<ProfileBuffer>(10);
   pb->AddEntry(ProfileBufferEntry::Time(123.1));
   ASSERT_TRUE(pb->GetEntry(pb->mRangeStart).IsTime());
   ASSERT_TRUE(pb->GetEntry(pb->mRangeStart).u.mDouble == 123.1);
 }
 
 // See if we can insert some entries
 TEST(ThreadProfile, InsertEntriesNoWrap) {
-  int tid = 1000;
-  nsCOMPtr<nsIThread> mainThread;
-  NS_GetMainThread(getter_AddRefs(mainThread));
-  ThreadInfo info("testThread", tid, true, mainThread, nullptr);
   auto pb = MakeUnique<ProfileBuffer>(100);
   int test_size = 50;
   for (int i = 0; i < test_size; i++) {
     pb->AddEntry(ProfileBufferEntry::Time(i));
   }
   uint64_t readPos = pb->mRangeStart;
   while (readPos != pb->mRangeEnd) {
     ASSERT_TRUE(pb->GetEntry(readPos).IsTime());
     ASSERT_TRUE(pb->GetEntry(readPos).u.mDouble == readPos);
     readPos++;
   }
 }
 
 // See if evicting works as it should in the basic case
 TEST(ThreadProfile, InsertEntriesWrap) {
-  int tid = 1000;
   int entries = 32;
-  nsCOMPtr<nsIThread> mainThread;
-  NS_GetMainThread(getter_AddRefs(mainThread));
-  ThreadInfo info("testThread", tid, true, mainThread, nullptr);
   auto pb = MakeUnique<ProfileBuffer>(entries);
   ASSERT_TRUE(pb->mRangeStart == 0);
   ASSERT_TRUE(pb->mRangeEnd == 0);
   int test_size = 43;
   for (int i = 0; i < test_size; i++) {
     pb->AddEntry(ProfileBufferEntry::Time(i));
   }
   // We inserted 11 more entries than fit in the buffer, so the first 11 entries