Bug 1319987: P9. More coding style fixes. r?gerald draft
authorJean-Yves Avenard <jyavenard@mozilla.com>
Fri, 27 Jan 2017 13:20:37 +0100
changeset 479126 98787c7bedf0d0d1fa5e2a284d9b0e8ca7e9a526
parent 479125 32bf0abda6358317b332fe7c727adc17012c22e5
child 479127 02e415a1af342db2ef4c716f50851562163a1cbe
push id44146
push userbmo:jyavenard@mozilla.com
push dateSun, 05 Feb 2017 21:13:07 +0000
reviewersgerald
bugs1319987
milestone54.0a1
Bug 1319987: P9. More coding style fixes. r?gerald MozReview-Commit-ID: DhFRqkWQZny
dom/media/AbstractMediaDecoder.h
dom/media/Benchmark.h
dom/media/DOMMediaStream.cpp
dom/media/GraphDriver.cpp
dom/media/MediaCache.cpp
dom/media/MediaData.h
dom/media/MediaDataDemuxer.h
dom/media/MediaDecoder.cpp
dom/media/MediaDecoder.h
dom/media/MediaDecoderReader.h
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaDecoderStateMachine.h
dom/media/MediaFormatReader.cpp
dom/media/MediaFormatReader.h
dom/media/MediaInfo.h
dom/media/MediaRecorder.cpp
dom/media/MediaStreamGraph.cpp
dom/media/MediaStreamListener.cpp
dom/media/flac/FlacDemuxer.cpp
dom/media/mediasink/DecodedAudioDataSink.cpp
dom/media/mediasource/ContainerParser.cpp
dom/media/mediasource/MediaSourceDecoder.cpp
dom/media/mediasource/MediaSourceDemuxer.cpp
dom/media/mediasource/TrackBuffersManager.cpp
dom/media/ogg/OggCodecState.cpp
dom/media/webaudio/AudioBuffer.cpp
--- a/dom/media/AbstractMediaDecoder.h
+++ b/dom/media/AbstractMediaDecoder.h
@@ -12,35 +12,35 @@
 
 #include "FrameStatistics.h"
 #include "MediaEventSource.h"
 #include "MediaInfo.h"
 #include "nsISupports.h"
 #include "nsDataHashtable.h"
 #include "nsThreadUtils.h"
 
-namespace mozilla
-{
+namespace mozilla {
 
-namespace layers
-{
-  class ImageContainer;
-  class KnowsCompositor;
+namespace layers {
+class ImageContainer;
+class KnowsCompositor;
 } // namespace layers
+
 class AbstractThread;
 class MediaResource;
 class ReentrantMonitor;
 class VideoFrameContainer;
 class MediaDecoderOwner;
 class CDMProxy;
 class GMPCrashHelper;
 
 typedef nsDataHashtable<nsCStringHashKey, nsCString> MetadataTags;
 
-static inline bool IsCurrentThread(nsIThread* aThread) {
+static inline bool IsCurrentThread(nsIThread* aThread)
+{
   return NS_GetCurrentThread() == aThread;
 }
 
 /**
  * The AbstractMediaDecoder class describes the public interface for a media decoder
  * and is used by the MediaReader classes.
  */
 class AbstractMediaDecoder : public nsIObserver
@@ -54,95 +54,105 @@ public:
   // by currentSrc. Returns what was passed to Load(), if Load() has been called.
   virtual MediaResource* GetResource() const = 0;
 
   // Increments the parsed, decoded and dropped frame counters by the passed in
   // counts.
   // Can be called on any thread.
   virtual void NotifyDecodedFrames(const FrameStatisticsData& aStats) = 0;
 
-  virtual AbstractCanonical<media::NullableTimeUnit>* CanonicalDurationOrNull() { return nullptr; };
+  virtual AbstractCanonical<media::NullableTimeUnit>* CanonicalDurationOrNull()
+  {
+    return nullptr;
+  };
 
   // Return an event that will be notified when data arrives in MediaResource.
   // MediaDecoderReader will register with this event to receive notifications
   // in order to update buffer ranges.
   // Return null if this decoder doesn't support the event.
   virtual MediaEventSource<void>* DataArrivedEvent()
   {
     return nullptr;
   }
 
   // Returns an event that will be notified when the owning document changes state
   // and we might have a new compositor. If this new compositor requires us to
   // recreate our decoders, then we expect the existing decoderis to return an
   // error independently of this.
-  virtual MediaEventSource<RefPtr<layers::KnowsCompositor>>* CompositorUpdatedEvent()
+  virtual MediaEventSource<RefPtr<layers::KnowsCompositor>>*
+  CompositorUpdatedEvent()
   {
     return nullptr;
   }
 
   // Notify the media decoder that a decryption key is required before emitting
   // further output. This only needs to be overridden for decoders that expect
   // encryption, such as the MediaSource decoder.
-  virtual void NotifyWaitingForKey() {}
+  virtual void NotifyWaitingForKey() { }
 
   // Return an event that will be notified when a decoder is waiting for a
   // decryption key before it can return more output.
   virtual MediaEventSource<void>* WaitingForKeyEvent()
   {
     return nullptr;
   }
 
   // Return an abstract thread on which to run main thread runnables.
   virtual AbstractThread* AbstractMainThread() const = 0;
 
 protected:
-  virtual void UpdateEstimatedMediaDuration(int64_t aDuration) {};
+  virtual void UpdateEstimatedMediaDuration(int64_t aDuration) { };
 public:
   void DispatchUpdateEstimatedMediaDuration(int64_t aDuration)
   {
-    NS_DispatchToMainThread(NewRunnableMethod<int64_t>(this,
-                                                       &AbstractMediaDecoder::UpdateEstimatedMediaDuration,
-                                                       aDuration));
+    NS_DispatchToMainThread(NewRunnableMethod<int64_t>(
+      this, &AbstractMediaDecoder::UpdateEstimatedMediaDuration, aDuration));
   }
 
   virtual VideoFrameContainer* GetVideoFrameContainer() = 0;
   virtual mozilla::layers::ImageContainer* GetImageContainer() = 0;
 
   // Returns the owner of this decoder or null when the decoder is shutting
   // down. The owner should only be used on the main thread.
   virtual MediaDecoderOwner* GetOwner() const = 0;
 
   // Set by Reader if the current audio track can be offloaded
-  virtual void SetPlatformCanOffloadAudio(bool aCanOffloadAudio) {}
+  virtual void SetPlatformCanOffloadAudio(bool aCanOffloadAudio) { }
 
   virtual already_AddRefed<GMPCrashHelper> GetCrashHelper() { return nullptr; }
 
   // Stack based class to assist in notifying the frame statistics of
   // parsed and decoded frames. Use inside video demux & decode functions
   // to ensure all parsed and decoded frames are reported on all return paths.
-  class AutoNotifyDecoded {
+  class AutoNotifyDecoded
+  {
   public:
     explicit AutoNotifyDecoded(AbstractMediaDecoder* aDecoder)
       : mDecoder(aDecoder)
-    {}
-    ~AutoNotifyDecoded() {
+    {
+    }
+    ~AutoNotifyDecoded()
+    {
       if (mDecoder) {
         mDecoder->NotifyDecodedFrames(mStats);
       }
     }
 
     FrameStatisticsData mStats;
 
   private:
     AbstractMediaDecoder* mDecoder;
   };
 
   // Classes directly inheriting from AbstractMediaDecoder do not support
   // Observe and it should never be called directly.
-  NS_IMETHOD Observe(nsISupports *aSubject, const char * aTopic, const char16_t * aData) override
-  { MOZ_CRASH("Forbidden method"); return NS_OK; }
+  NS_IMETHOD Observe(nsISupports* aSubject, const char* aTopic,
+                     const char16_t* aData) override
+  {
+    MOZ_CRASH("Forbidden method");
+    return NS_OK;
+  }
 };
 
 } // namespace mozilla
 
 #endif
 
--- a/dom/media/Benchmark.h
+++ b/dom/media/Benchmark.h
@@ -19,17 +19,17 @@
 namespace mozilla {
 
 class TaskQueue;
 class Benchmark;
 
 class BenchmarkPlayback : public QueueObject
 {
   friend class Benchmark;
-  explicit BenchmarkPlayback(Benchmark* aMainThreadState, MediaDataDemuxer* aDemuxer);
+  BenchmarkPlayback(Benchmark* aMainThreadState, MediaDataDemuxer* aDemuxer);
   void DemuxSamples();
   void DemuxNextSample();
   void MainThreadShutdown();
   void InitDecoder(TrackInfo&& aInfo);
 
   void Output(const MediaDataDecoder::DecodedData& aResults);
   void InputExhausted();
 
@@ -56,36 +56,41 @@ class Benchmark : public QueueObject
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Benchmark)
 
   struct Parameters
   {
     Parameters()
       : mFramesToMeasure(-1)
       , mStartupFrame(1)
-      , mTimeout(TimeDuration::Forever()) {}
+      , mTimeout(TimeDuration::Forever())
+    {
+    }
 
     Parameters(int32_t aFramesToMeasure,
                uint32_t aStartupFrame,
                int32_t aStopAtFrame,
                const TimeDuration& aTimeout)
       : mFramesToMeasure(aFramesToMeasure)
       , mStartupFrame(aStartupFrame)
       , mStopAtFrame(Some(aStopAtFrame))
-      , mTimeout(aTimeout) {}
+      , mTimeout(aTimeout)
+    {
+    }
 
     const int32_t mFramesToMeasure;
     const uint32_t mStartupFrame;
     const Maybe<int32_t> mStopAtFrame;
     const TimeDuration mTimeout;
   };
 
   typedef MozPromise<uint32_t, bool, /* IsExclusive = */ true> BenchmarkPromise;
 
-  explicit Benchmark(MediaDataDemuxer* aDemuxer, const Parameters& aParameters = Parameters());
+  explicit Benchmark(MediaDataDemuxer* aDemuxer,
+                     const Parameters& aParameters = Parameters());
   RefPtr<BenchmarkPromise> Run();
 
   static void Init();
 
 private:
   friend class BenchmarkPlayback;
   virtual ~Benchmark();
   void ReturnResult(uint32_t aDecodeFps);
--- a/dom/media/DOMMediaStream.cpp
+++ b/dom/media/DOMMediaStream.cpp
@@ -719,17 +719,17 @@ already_AddRefed<DOMMediaStream>
 DOMMediaStream::CloneInternal(TrackForwardingOption aForwarding)
 {
   RefPtr<DOMMediaStream> newStream =
     new DOMMediaStream(GetParentObject(), new ClonedStreamSourceGetter(this));
 
   LOG(LogLevel::Info, ("DOMMediaStream %p created clone %p, forwarding %s tracks",
                        this, newStream.get(),
                        aForwarding == TrackForwardingOption::ALL
-                         ? "all" : "current"));
+                       ? "all" : "current"));
 
   MOZ_RELEASE_ASSERT(mPlaybackStream);
   MOZ_RELEASE_ASSERT(mPlaybackStream->Graph());
   MediaStreamGraph* graph = mPlaybackStream->Graph();
 
   // We initiate the owned and playback streams first, since we need to create
   // all existing DOM tracks before we add the generic input port from
   // mInputStream to mOwnedStream (see AllocateInputPort wrt. destination
--- a/dom/media/GraphDriver.cpp
+++ b/dom/media/GraphDriver.cpp
@@ -62,18 +62,18 @@ void GraphDriver::SetGraphTime(GraphDriv
   mIterationEnd = aLastSwitchNextIterationEnd;
 
   MOZ_ASSERT(!PreviousDriver());
   MOZ_ASSERT(aPreviousDriver);
 
   STREAM_LOG(LogLevel::Debug, ("Setting previous driver: %p (%s)",
                                aPreviousDriver,
                                aPreviousDriver->AsAudioCallbackDriver()
-                                 ? "AudioCallbackDriver"
-                                 : "SystemClockDriver"));
+                               ? "AudioCallbackDriver"
+                               : "SystemClockDriver"));
   SetPreviousDriver(aPreviousDriver);
 }
 
 void GraphDriver::SwitchAtNextIteration(GraphDriver* aNextDriver)
 {
   GraphImpl()->GetMonitor().AssertCurrentThreadOwns();
   LIFECYCLE_LOG("Switching to new driver: %p (%s)",
       aNextDriver, aNextDriver->AsAudioCallbackDriver() ?
--- a/dom/media/MediaCache.cpp
+++ b/dom/media/MediaCache.cpp
@@ -1521,18 +1521,18 @@ MediaCache::AllocateAndWriteBlock(MediaC
       if (!bo)
         return;
 
       bo->mStream = stream;
       bo->mStreamBlock = streamBlockIndex;
       bo->mLastUseTime = now;
       stream->mBlocks[streamBlockIndex] = blockIndex;
       if (streamBlockIndex*BLOCK_SIZE < stream->mStreamOffset) {
-        bo->mClass = aMode == MediaCacheStream::MODE_PLAYBACK
-          ? PLAYED_BLOCK : METADATA_BLOCK;
+        bo->mClass = aMode == MediaCacheStream::MODE_PLAYBACK ? PLAYED_BLOCK
+                                                              : METADATA_BLOCK;
         // This must be the most-recently-used block, since we
         // marked it as used now (which may be slightly bogus, but we'll
         // treat it as used for simplicity).
         GetListForBlock(bo)->AddFirstBlock(blockIndex);
         Verify();
       } else {
         // This may not be the latest readahead block, although it usually
         // will be. We may have to scan for the right place to insert
@@ -1643,17 +1643,18 @@ MediaCache::NoteBlockUsage(MediaCacheStr
   // The following check has to be <= because the stream offset has
   // not yet been updated for the data read from this block
   NS_ASSERTION(bo->mStreamBlock*BLOCK_SIZE <= aStreamOffset,
                "Using a block that's behind the read position?");
 
   GetListForBlock(bo)->RemoveBlock(aBlockIndex);
   bo->mClass =
     (aMode == MediaCacheStream::MODE_METADATA || bo->mClass == METADATA_BLOCK)
-    ? METADATA_BLOCK : PLAYED_BLOCK;
+    ? METADATA_BLOCK
+    : PLAYED_BLOCK;
   // Since this is just being used now, it can definitely be at the front
   // of mMetadataBlocks or mPlayedBlocks
   GetListForBlock(bo)->AddFirstBlock(aBlockIndex);
   bo->mLastUseTime = aNow;
   Verify();
 }
 
 void
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -265,22 +265,24 @@ private:
 };
 
 typedef AlignedBuffer<uint8_t> AlignedByteBuffer;
 typedef AlignedBuffer<float> AlignedFloatBuffer;
 typedef AlignedBuffer<int16_t> AlignedShortBuffer;
 typedef AlignedBuffer<AudioDataValue> AlignedAudioBuffer;
 
 // Container that holds media samples.
-class MediaData {
+class MediaData
+{
 public:
 
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaData)
 
-  enum Type {
+  enum Type
+  {
     AUDIO_DATA = 0,
     VIDEO_DATA,
     RAW_DATA,
     NULL_DATA
   };
 
   MediaData(Type aType,
             int64_t aOffset,
@@ -289,17 +291,18 @@ public:
             uint32_t aFrames)
     : mType(aType)
     , mOffset(aOffset)
     , mTime(aTimestamp)
     , mTimecode(aTimestamp)
     , mDuration(aDuration)
     , mFrames(aFrames)
     , mKeyframe(false)
-  {}
+  {
+  }
 
   // Type of contained data.
   const Type mType;
 
   // Approximate byte offset where this data was demuxed from its media.
   int64_t mOffset;
 
   // Start time of sample, in microseconds.
@@ -343,35 +346,38 @@ protected:
   MediaData(Type aType, uint32_t aFrames)
     : mType(aType)
     , mOffset(0)
     , mTime(0)
     , mTimecode(0)
     , mDuration(0)
     , mFrames(aFrames)
     , mKeyframe(false)
-  {}
+  {
+  }
 
   virtual ~MediaData() {}
 
 };
 
 // NullData is for decoder generating a sample which doesn't need to be
 // rendered.
-class NullData : public MediaData {
+class NullData : public MediaData
+{
 public:
   NullData(int64_t aOffset, int64_t aTime, int64_t aDuration)
     : MediaData(NULL_DATA, aOffset, aTime, aDuration, 0)
   {}
 
   static const Type sType = NULL_DATA;
 };
 
 // Holds chunk a decoded audio frames.
-class AudioData : public MediaData {
+class AudioData : public MediaData
+{
 public:
 
   AudioData(int64_t aOffset,
             int64_t aTime,
             int64_t aDuration,
             uint32_t aFrames,
             AlignedAudioBuffer&& aData,
             uint32_t aChannels,
@@ -406,57 +412,61 @@ public:
   const uint32_t mRate;
   // At least one of mAudioBuffer/mAudioData must be non-null.
   // mChannels channels, each with mFrames frames
   RefPtr<SharedBuffer> mAudioBuffer;
   // mFrames frames, each with mChannels values
   AlignedAudioBuffer mAudioData;
 
 protected:
-  ~AudioData() {}
+  ~AudioData() { }
 };
 
 namespace layers {
 class TextureClient;
 class PlanarYCbCrImage;
 } // namespace layers
 
 class VideoInfo;
 
 // Holds a decoded video frame, in YCbCr format. These are queued in the reader.
-class VideoData : public MediaData {
+class VideoData : public MediaData
+{
 public:
   typedef gfx::IntRect IntRect;
   typedef gfx::IntSize IntSize;
   typedef layers::ImageContainer ImageContainer;
   typedef layers::Image Image;
   typedef layers::PlanarYCbCrImage PlanarYCbCrImage;
 
   static const Type sType = VIDEO_DATA;
   static const char* sTypeName;
 
   // YCbCr data obtained from decoding the video. The index's are:
   //   0 = Y
   //   1 = Cb
   //   2 = Cr
-  struct YCbCrBuffer {
-    struct Plane {
+  struct YCbCrBuffer
+  {
+    struct Plane
+    {
       uint8_t* mData;
       uint32_t mWidth;
       uint32_t mHeight;
       uint32_t mStride;
       uint32_t mOffset;
       uint32_t mSkip;
     };
 
     Plane mPlanes[3];
     YUVColorSpace mYUVColorSpace = YUVColorSpace::BT601;
   };
 
-  class Listener {
+  class Listener
+  {
   public:
     virtual void OnSentToCompositor() = 0;
     virtual ~Listener() {}
   };
 
   // Constructs a VideoData object. If aImage is nullptr, creates a new Image
   // holding a copy of the YCbCr data passed in aBuffer. If aImage is not
   // nullptr, it's stored as the underlying video image and aBuffer is assumed
@@ -464,54 +474,58 @@ public:
   // specific number representing the timestamp of the frame of video data.
   // Returns nsnull if an error occurs. This may indicate that memory couldn't
   // be allocated to create the VideoData object, or it may indicate some
   // problem with the input data (e.g. negative stride).
 
 
   // Creates a new VideoData containing a deep copy of aBuffer. May use aContainer
   // to allocate an Image to hold the copied data.
-  static already_AddRefed<VideoData> CreateAndCopyData(const VideoInfo& aInfo,
-                                                       ImageContainer* aContainer,
-                                                       int64_t aOffset,
-                                                       int64_t aTime,
-                                                       int64_t aDuration,
-                                                       const YCbCrBuffer &aBuffer,
-                                                       bool aKeyframe,
-                                                       int64_t aTimecode,
-                                                       const IntRect& aPicture);
+  static already_AddRefed<VideoData> CreateAndCopyData(
+    const VideoInfo& aInfo,
+    ImageContainer* aContainer,
+    int64_t aOffset,
+    int64_t aTime,
+    int64_t aDuration,
+    const YCbCrBuffer &aBuffer,
+    bool aKeyframe,
+    int64_t aTimecode,
+    const IntRect& aPicture);
 
-  static already_AddRefed<VideoData> CreateAndCopyData(const VideoInfo& aInfo,
-                                                       ImageContainer* aContainer,
-                                                       int64_t aOffset,
-                                                       int64_t aTime,
-                                                       int64_t aDuration,
-                                                       const YCbCrBuffer &aBuffer,
-                                                       const YCbCrBuffer::Plane &aAlphaPlane,
-                                                       bool aKeyframe,
-                                                       int64_t aTimecode,
-                                                       const IntRect& aPicture);
+  static already_AddRefed<VideoData> CreateAndCopyData(
+    const VideoInfo& aInfo,
+    ImageContainer* aContainer,
+    int64_t aOffset,
+    int64_t aTime,
+    int64_t aDuration,
+    const YCbCrBuffer &aBuffer,
+    const YCbCrBuffer::Plane &aAlphaPlane,
+    bool aKeyframe,
+    int64_t aTimecode,
+    const IntRect& aPicture);
 
-  static already_AddRefed<VideoData> CreateAndCopyIntoTextureClient(const VideoInfo& aInfo,
-                                                                    int64_t aOffset,
-                                                                    int64_t aTime,
-                                                                    int64_t aDuration,
-                                                                    layers::TextureClient* aBuffer,
-                                                                    bool aKeyframe,
-                                                                    int64_t aTimecode,
-                                                                    const IntRect& aPicture);
+  static already_AddRefed<VideoData> CreateAndCopyIntoTextureClient(
+    const VideoInfo& aInfo,
+    int64_t aOffset,
+    int64_t aTime,
+    int64_t aDuration,
+    layers::TextureClient* aBuffer,
+    bool aKeyframe,
+    int64_t aTimecode,
+    const IntRect& aPicture);
 
-  static already_AddRefed<VideoData> CreateFromImage(const VideoInfo& aInfo,
-                                                     int64_t aOffset,
-                                                     int64_t aTime,
-                                                     int64_t aDuration,
-                                                     const RefPtr<Image>& aImage,
-                                                     bool aKeyframe,
-                                                     int64_t aTimecode,
-                                                     const IntRect& aPicture);
+  static already_AddRefed<VideoData> CreateFromImage(
+    const VideoInfo& aInfo,
+    int64_t aOffset,
+    int64_t aTime,
+    int64_t aDuration,
+    const RefPtr<Image>& aImage,
+    bool aKeyframe,
+    int64_t aTimecode,
+    const IntRect& aPicture);
 
   // Initialize PlanarYCbCrImage. Only When aCopyData is true,
   // video data is copied to PlanarYCbCrImage.
   static bool SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
                                   const VideoInfo& aInfo,
                                   const YCbCrBuffer &aBuffer,
                                   const IntRect& aPicture,
                                   bool aCopyData);
@@ -548,17 +562,17 @@ protected:
 
   bool mSentToCompositor;
   UniquePtr<Listener> mListener;
 };
 
 class CryptoTrack
 {
 public:
-  CryptoTrack() : mValid(false), mMode(0), mIVSize(0) {}
+  CryptoTrack() : mValid(false), mMode(0), mIVSize(0) { }
   bool mValid;
   int32_t mMode;
   int32_t mIVSize;
   nsTArray<uint8_t> mKeyId;
 };
 
 class CryptoSample : public CryptoTrack
 {
@@ -615,17 +629,18 @@ public:
 
 private:
   friend class MediaRawData;
   explicit MediaRawDataWriter(MediaRawData* aMediaRawData);
   bool EnsureSize(size_t aSize);
   MediaRawData* mTarget;
 };
 
-class MediaRawData : public MediaData {
+class MediaRawData : public MediaData
+{
 public:
   MediaRawData();
   MediaRawData(const uint8_t* aData, size_t aSize);
   MediaRawData(const uint8_t* aData, size_t aSize,
                const uint8_t* aAlphaData, size_t aAlphaSize);
 
   // Pointer to data or null if not-yet allocated
   const uint8_t* Data() const { return mBuffer.Data(); }
@@ -671,17 +686,17 @@ private:
   CryptoSample mCryptoInternal;
   MediaRawData(const MediaRawData&); // Not implemented
 };
 
   // MediaByteBuffer is a ref counted infallible TArray.
 class MediaByteBuffer : public nsTArray<uint8_t> {
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaByteBuffer);
   MediaByteBuffer() = default;
-  explicit MediaByteBuffer(size_t aCapacity) : nsTArray<uint8_t>(aCapacity) {}
+  explicit MediaByteBuffer(size_t aCapacity) : nsTArray<uint8_t>(aCapacity) { }
 
 private:
-  ~MediaByteBuffer() {}
+  ~MediaByteBuffer() { }
 };
 
 } // namespace mozilla
 
 #endif // MediaData_h
--- a/dom/media/MediaDataDemuxer.h
+++ b/dom/media/MediaDataDemuxer.h
@@ -50,18 +50,18 @@ public:
   // 0 indicates that no such type is available.
   virtual uint32_t GetNumberTracks(TrackInfo::TrackType aType) const = 0;
 
   // Returns the MediaTrackDemuxer associated with aTrackNumber aType track.
   // aTrackNumber is not to be confused with the Track ID.
   // aTrackNumber must be constrained between  0 and  GetNumberTracks(aType) - 1
   // The actual Track ID is to be retrieved by calling
   // MediaTrackDemuxer::TrackInfo.
-  virtual already_AddRefed<MediaTrackDemuxer> GetTrackDemuxer(TrackInfo::TrackType aType,
-                                                              uint32_t aTrackNumber) = 0;
+  virtual already_AddRefed<MediaTrackDemuxer> GetTrackDemuxer(
+    TrackInfo::TrackType aType, uint32_t aTrackNumber) = 0;
 
   // Returns true if the underlying resource allows seeking.
   virtual bool IsSeekable() const = 0;
 
   // Returns true if the underlying resource can only seek within buffered
   // ranges.
   virtual bool IsSeekableOnlyInBufferedRanges() const { return false; }
 
@@ -96,37 +96,43 @@ protected:
   }
 };
 
 class MediaTrackDemuxer
 {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaTrackDemuxer)
 
-  class SamplesHolder {
+  class SamplesHolder
+  {
   public:
     NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SamplesHolder)
     nsTArray<RefPtr<MediaRawData>> mSamples;
   private:
-    ~SamplesHolder() {}
+    ~SamplesHolder() { }
   };
 
-  class SkipFailureHolder {
+  class SkipFailureHolder
+  {
   public:
     SkipFailureHolder(const MediaResult& aFailure, uint32_t aSkipped)
       : mFailure(aFailure)
       , mSkipped(aSkipped)
     {}
     MediaResult mFailure;
     uint32_t mSkipped;
   };
 
-  typedef MozPromise<media::TimeUnit, MediaResult, /* IsExclusive = */ true> SeekPromise;
-  typedef MozPromise<RefPtr<SamplesHolder>, MediaResult, /* IsExclusive = */ true> SamplesPromise;
-  typedef MozPromise<uint32_t, SkipFailureHolder, /* IsExclusive = */ true> SkipAccessPointPromise;
+  typedef MozPromise<media::TimeUnit, MediaResult, /* IsExclusive = */ true>
+    SeekPromise;
+  typedef MozPromise<RefPtr<SamplesHolder>, MediaResult,
+                     /* IsExclusive = */ true>
+    SamplesPromise;
+  typedef MozPromise<uint32_t, SkipFailureHolder, /* IsExclusive = */ true>
+    SkipAccessPointPromise;
 
   // Returns the TrackInfo (a.k.a Track Description) for this track.
   // The TrackInfo returned will be:
   // TrackInfo::kVideoTrack -> VideoInfo.
   // TrackInfo::kAudioTrack -> AudioInfo.
   // respectively.
   virtual UniquePtr<TrackInfo> GetInfo() const = 0;
 
@@ -202,14 +208,14 @@ public:
 
   // If the MediaTrackDemuxer and MediaDataDemuxer hold cross references.
   // BreakCycles must be overridden.
   virtual void BreakCycles()
   {
   }
 
 protected:
-  virtual ~MediaTrackDemuxer() {}
+  virtual ~MediaTrackDemuxer() { }
 };
 
 } // namespace mozilla
 
 #endif // MediaDataDemuxer_h
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -87,26 +87,28 @@ class MediaMemoryTracker : public nsIMem
 
   MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf);
 
   MediaMemoryTracker();
   void InitMemoryReporter();
 
   static StaticRefPtr<MediaMemoryTracker> sUniqueInstance;
 
-  static MediaMemoryTracker* UniqueInstance() {
+  static MediaMemoryTracker* UniqueInstance()
+  {
     if (!sUniqueInstance) {
       sUniqueInstance = new MediaMemoryTracker();
       sUniqueInstance->InitMemoryReporter();
     }
     return sUniqueInstance;
   }
 
   typedef nsTArray<MediaDecoder*> DecodersArray;
-  static DecodersArray& Decoders() {
+  static DecodersArray& Decoders()
+  {
     return UniqueInstance()->mDecoders;
   }
 
   DecodersArray mDecoders;
 
 public:
   static void AddMediaDecoder(MediaDecoder* aDecoder)
   {
@@ -436,34 +438,36 @@ MediaDecoder::MediaDecoder(MediaDecoderO
   //
   // Initialize watchers.
   //
 
   // mDuration
   mWatchManager.Watch(mStateMachineDuration, &MediaDecoder::DurationChanged);
 
   // mStateMachineIsShutdown
-  mWatchManager.Watch(mStateMachineIsShutdown, &MediaDecoder::ShutdownBitChanged);
+  mWatchManager.Watch(mStateMachineIsShutdown,
+                      &MediaDecoder::ShutdownBitChanged);
 
   // readyState
   mWatchManager.Watch(mPlayState, &MediaDecoder::UpdateReadyState);
   mWatchManager.Watch(mNextFrameStatus, &MediaDecoder::UpdateReadyState);
   // ReadyState computation depends on MediaDecoder::CanPlayThrough, which
   // depends on the download rate.
   mWatchManager.Watch(mBuffered, &MediaDecoder::UpdateReadyState);
 
   // mLogicalPosition
   mWatchManager.Watch(mCurrentPosition, &MediaDecoder::UpdateLogicalPosition);
   mWatchManager.Watch(mPlayState, &MediaDecoder::UpdateLogicalPosition);
   mWatchManager.Watch(mLogicallySeeking, &MediaDecoder::UpdateLogicalPosition);
 
   // mIgnoreProgressData
   mWatchManager.Watch(mLogicallySeeking, &MediaDecoder::SeekingChanged);
 
-  mWatchManager.Watch(mIsAudioDataAudible, &MediaDecoder::NotifyAudibleStateChanged);
+  mWatchManager.Watch(mIsAudioDataAudible,
+                      &MediaDecoder::NotifyAudibleStateChanged);
 
   MediaShutdownManager::Instance().Register(this);
 }
 
 #undef INIT_MIRROR
 #undef INIT_CANONICAL
 
 void
@@ -643,18 +647,19 @@ MediaDecoder::SetStateMachineParameters(
   MOZ_ASSERT(NS_IsMainThread());
   if (mPlaybackRate != 1 && mPlaybackRate != 0) {
     mDecoderStateMachine->DispatchSetPlaybackRate(mPlaybackRate);
   }
   mTimedMetadataListener = mDecoderStateMachine->TimedMetadataEvent().Connect(
     mAbstractMainThread, this, &MediaDecoder::OnMetadataUpdate);
   mMetadataLoadedListener = mDecoderStateMachine->MetadataLoadedEvent().Connect(
     mAbstractMainThread, this, &MediaDecoder::MetadataLoaded);
-  mFirstFrameLoadedListener = mDecoderStateMachine->FirstFrameLoadedEvent().Connect(
-    mAbstractMainThread, this, &MediaDecoder::FirstFrameLoaded);
+  mFirstFrameLoadedListener =
+    mDecoderStateMachine->FirstFrameLoadedEvent().Connect(
+      mAbstractMainThread, this, &MediaDecoder::FirstFrameLoaded);
 
   mOnPlaybackEvent = mDecoderStateMachine->OnPlaybackEvent().Connect(
     mAbstractMainThread, this, &MediaDecoder::OnPlaybackEvent);
   mOnPlaybackErrorEvent = mDecoderStateMachine->OnPlaybackErrorEvent().Connect(
     mAbstractMainThread, this, &MediaDecoder::OnPlaybackErrorEvent);
   mOnDecoderDoctorEvent = mDecoderStateMachine->OnDecoderDoctorEvent().Connect(
     mAbstractMainThread, this, &MediaDecoder::OnDecoderDoctorEvent);
   mOnMediaNotSeekable = mDecoderStateMachine->OnMediaNotSeekable().Connect(
@@ -690,17 +695,18 @@ MediaDecoder::Play()
     return NS_OK;
   }
 
   ChangeState(PLAY_STATE_PLAYING);
   return NS_OK;
 }
 
 nsresult
-MediaDecoder::Seek(double aTime, SeekTarget::Type aSeekType, dom::Promise* aPromise /*=nullptr*/)
+MediaDecoder::Seek(double aTime, SeekTarget::Type aSeekType,
+                   dom::Promise* aPromise /*=nullptr*/)
 {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
 
   MOZ_ASSERT(aTime >= 0.0, "Cannot seek to a negative value.");
 
   int64_t timeUsecs = TimeUnit::FromSeconds(aTime).ToMicroseconds();
 
@@ -808,17 +814,18 @@ MediaDecoder::MetadataLoaded(nsAutoPtr<M
   mMediaSeekableOnlyInBufferedRanges = aInfo->mMediaSeekableOnlyInBufferedRanges;
   mInfo = aInfo.forget();
   ConstructMediaTracks();
 
   // Make sure the element and the frame (if any) are told about
   // our new size.
   if (aEventVisibility != MediaDecoderEventVisibility::Suppressed) {
     mFiredMetadataLoaded = true;
-    GetOwner()->MetadataLoaded(mInfo, nsAutoPtr<const MetadataTags>(aTags.forget()));
+    GetOwner()->MetadataLoaded(mInfo,
+                               nsAutoPtr<const MetadataTags>(aTags.forget()));
   }
   // Invalidate() will end up calling GetOwner()->UpdateMediaSize with the last
   // dimensions retrieved from the video frame container. The video frame
   // container contains more up to date dimensions than aInfo.
   // So we call Invalidate() after calling GetOwner()->MetadataLoaded to ensure
   // the media element has the latest dimensions.
   Invalidate();
 
@@ -833,25 +840,27 @@ MediaDecoder::EnsureTelemetryReported()
   if (mTelemetryReported || !mInfo) {
     // Note: sometimes we get multiple MetadataLoaded calls (for example
     // for chained ogg). So we ensure we don't report duplicate results for
     // these resources.
     return;
   }
 
   nsTArray<nsCString> codecs;
-  if (mInfo->HasAudio() && !mInfo->mAudio.GetAsAudioInfo()->mMimeType.IsEmpty()) {
+  if (mInfo->HasAudio()
+      && !mInfo->mAudio.GetAsAudioInfo()->mMimeType.IsEmpty()) {
     codecs.AppendElement(mInfo->mAudio.GetAsAudioInfo()->mMimeType);
   }
-  if (mInfo->HasVideo() && !mInfo->mVideo.GetAsVideoInfo()->mMimeType.IsEmpty()) {
+  if (mInfo->HasVideo()
+      && !mInfo->mVideo.GetAsVideoInfo()->mMimeType.IsEmpty()) {
     codecs.AppendElement(mInfo->mVideo.GetAsVideoInfo()->mMimeType);
   }
   if (codecs.IsEmpty()) {
-    codecs.AppendElement(nsPrintfCString("resource; %s",
-                                         mResource->GetContentType().OriginalString().Data()));
+    codecs.AppendElement(nsPrintfCString(
+      "resource; %s", mResource->GetContentType().OriginalString().Data()));
   }
   for (const nsCString& codec : codecs) {
     DECODER_LOG("Telemetry MEDIA_CODEC_USED= '%s'", codec.get());
     Telemetry::Accumulate(Telemetry::ID::MEDIA_CODEC_USED, codec);
   }
 
   mTelemetryReported = true;
 }
@@ -865,19 +874,20 @@ MediaDecoder::PlayStateStr()
 
 void
 MediaDecoder::FirstFrameLoaded(nsAutoPtr<MediaInfo> aInfo,
                                MediaDecoderEventVisibility aEventVisibility)
 {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
 
-  DECODER_LOG("FirstFrameLoaded, channels=%u rate=%u hasAudio=%d hasVideo=%d mPlayState=%s",
-              aInfo->mAudio.mChannels, aInfo->mAudio.mRate,
-              aInfo->HasAudio(), aInfo->HasVideo(), PlayStateStr());
+  DECODER_LOG("FirstFrameLoaded, channels=%u rate=%u hasAudio=%d hasVideo=%d "
+              "mPlayState=%s",
+              aInfo->mAudio.mChannels, aInfo->mAudio.mRate, aInfo->HasAudio(),
+              aInfo->HasVideo(), PlayStateStr());
 
   mInfo = aInfo.forget();
 
   Invalidate();
 
   // This can run cache callbacks.
   mResource->EnsureCacheUpToDate();
 
@@ -957,18 +967,20 @@ public:
 private:
   WeakPtr<HTMLMediaElement> mElement;
 };
 
 already_AddRefed<GMPCrashHelper>
 MediaDecoder::GetCrashHelper()
 {
   MOZ_ASSERT(NS_IsMainThread());
-  return GetOwner()->GetMediaElement() ?
-    MakeAndAddRef<MediaElementGMPCrashHelper>(GetOwner()->GetMediaElement()) : nullptr;
+  return GetOwner()->GetMediaElement()
+         ? MakeAndAddRef<MediaElementGMPCrashHelper>(
+             GetOwner()->GetMediaElement())
+         : nullptr;
 }
 
 bool
 MediaDecoder::IsEnded() const
 {
   MOZ_ASSERT(NS_IsMainThread());
   return mPlayState == PLAY_STATE_ENDED;
 }
@@ -994,48 +1006,50 @@ MediaDecoder::PlaybackEnded()
   }
 
   DECODER_LOG("MediaDecoder::PlaybackEnded");
 
   ChangeState(PLAY_STATE_ENDED);
   InvalidateWithFlags(VideoFrameContainer::INVALIDATE_FORCE);
   GetOwner()->PlaybackEnded();
 
-  // This must be called after |GetOwner()->PlaybackEnded()| call above, in order
-  // to fire the required durationchange.
+  // This must be called after |GetOwner()->PlaybackEnded()| call above, in
+  // order to fire the required durationchange.
   if (IsInfinite()) {
     SetInfinite(false);
   }
 }
 
 MediaStatistics
 MediaDecoder::GetStatistics()
 {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mResource);
 
   MediaStatistics result;
-  result.mDownloadRate = mResource->GetDownloadRate(&result.mDownloadRateReliable);
+  result.mDownloadRate =
+    mResource->GetDownloadRate(&result.mDownloadRateReliable);
   result.mDownloadPosition = mResource->GetCachedDataEnd(mDecoderPosition);
   result.mTotalBytes = mResource->GetLength();
   result.mPlaybackRate = mPlaybackBytesPerSecond;
   result.mPlaybackRateReliable = mPlaybackRateReliable;
   result.mDecoderPosition = mDecoderPosition;
   result.mPlaybackPosition = mPlaybackPosition;
   return result;
 }
 
 void
 MediaDecoder::ComputePlaybackRate()
 {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mResource);
 
   int64_t length = mResource->GetLength();
-  if (!IsNaN(mDuration) && !mozilla::IsInfinite<double>(mDuration) && length >= 0) {
+  if (!IsNaN(mDuration) && !mozilla::IsInfinite<double>(mDuration)
+      && length >= 0) {
     mPlaybackRateReliable = true;
     mPlaybackBytesPerSecond = length / mDuration;
     return;
   }
 
   bool reliable = false;
   mPlaybackBytesPerSecond = mPlaybackStatistics->GetRateAtLastStop(&reliable);
   mPlaybackRateReliable = reliable;
@@ -1194,17 +1208,18 @@ MediaDecoder::ChangeState(PlayState aSta
 }
 
 void
 MediaDecoder::UpdateLogicalPositionInternal()
 {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
 
-  double currentPosition = static_cast<double>(CurrentPosition()) / static_cast<double>(USECS_PER_S);
+  double currentPosition =
+    static_cast<double>(CurrentPosition()) / static_cast<double>(USECS_PER_S);
   if (mPlayState == PLAY_STATE_ENDED) {
     currentPosition = std::max(currentPosition, mDuration);
   }
   bool logicalPositionChanged = mLogicalPosition != currentPosition;
   mLogicalPosition = currentPosition;
 
   // Invalidate the frame so any video data is displayed.
   // Do this before the timeupdate event so that if that
@@ -1238,18 +1253,19 @@ MediaDecoder::DurationChanged()
 
   DECODER_LOG("Duration changed to %f", mDuration);
 
   // Duration has changed so we should recompute playback rate
   UpdatePlaybackRate();
 
   // See https://www.w3.org/Bugs/Public/show_bug.cgi?id=28822 for a discussion
   // of whether we should fire durationchange on explicit infinity.
-  if (mFiredMetadataLoaded &&
-      (!mozilla::IsInfinite<double>(mDuration) || mExplicitDuration.Ref().isSome())) {
+  if (mFiredMetadataLoaded
+      && (!mozilla::IsInfinite<double>(mDuration)
+          || mExplicitDuration.Ref().isSome())) {
     GetOwner()->DispatchAsyncEvent(NS_LITERAL_STRING("durationchange"));
   }
 
   if (CurrentPosition() > TimeUnit::FromSeconds(mDuration).ToMicroseconds()) {
     Seek(mDuration, SeekTarget::Accurate);
   }
 }
 
@@ -1277,18 +1293,20 @@ MediaDecoder::UpdateEstimatedMediaDurati
   if (mPlayState <= PLAY_STATE_LOADING) {
     return;
   }
 
   // The duration is only changed if its significantly different than the
   // the current estimate, as the incoming duration is an estimate and so
   // often is unstable as more data is read and the estimate is updated.
   // Can result in a durationchangeevent. aDuration is in microseconds.
-  if (mEstimatedDuration.Ref().isSome() &&
-      mozilla::Abs(mEstimatedDuration.Ref().ref().ToMicroseconds() - aDuration) < ESTIMATED_DURATION_FUZZ_FACTOR_USECS) {
+  if (mEstimatedDuration.Ref().isSome()
+      && mozilla::Abs(mEstimatedDuration.Ref().ref().ToMicroseconds()
+                      - aDuration)
+         < ESTIMATED_DURATION_FUZZ_FACTOR_USECS) {
     return;
   }
 
   mEstimatedDuration = Some(TimeUnit::FromMicroseconds(aDuration));
 }
 
 bool
 MediaDecoder::IsTransportSeekable()
@@ -1322,28 +1340,29 @@ MediaDecoder::GetSeekable()
     return GetBuffered();
   } else if (!IsMediaSeekable()) {
     return media::TimeIntervals();
   } else if (!IsTransportSeekable()) {
     return GetBuffered();
   } else {
     return media::TimeIntervals(
       media::TimeInterval(media::TimeUnit::FromMicroseconds(0),
-                          IsInfinite() ?
-                            media::TimeUnit::FromInfinity() :
-                            media::TimeUnit::FromSeconds(GetDuration())));
+                          IsInfinite()
+                          ? media::TimeUnit::FromInfinity()
+                          : media::TimeUnit::FromSeconds(GetDuration())));
   }
 }
 
 void
 MediaDecoder::SetFragmentEndTime(double aTime)
 {
   MOZ_ASSERT(NS_IsMainThread());
   if (mDecoderStateMachine) {
-    mDecoderStateMachine->DispatchSetFragmentEndTime(static_cast<int64_t>(aTime * USECS_PER_S));
+    mDecoderStateMachine->DispatchSetFragmentEndTime(
+      static_cast<int64_t>(aTime * USECS_PER_S));
   }
 }
 
 void
 MediaDecoder::Suspend()
 {
   MOZ_ASSERT(NS_IsMainThread());
   if (mResource) {
@@ -1438,17 +1457,18 @@ MediaDecoder::SetStateMachine(MediaDecod
   } else {
     DisconnectMirrors();
   }
 }
 
 ImageContainer*
 MediaDecoder::GetImageContainer()
 {
-  return mVideoFrameContainer ? mVideoFrameContainer->GetImageContainer() : nullptr;
+  return mVideoFrameContainer ? mVideoFrameContainer->GetImageContainer()
+                              : nullptr;
 }
 
 void
 MediaDecoder::InvalidateWithFlags(uint32_t aFlags)
 {
   if (mVideoFrameContainer) {
     mVideoFrameContainer->InvalidateWithFlags(aFlags);
   }
@@ -1460,56 +1480,63 @@ MediaDecoder::Invalidate()
   if (mVideoFrameContainer) {
     mVideoFrameContainer->Invalidate();
   }
 }
 
 // Constructs the time ranges representing what segments of the media
 // are buffered and playable.
 media::TimeIntervals
-MediaDecoder::GetBuffered() {
+MediaDecoder::GetBuffered()
+{
   MOZ_ASSERT(NS_IsMainThread());
   return mBuffered.Ref();
 }
 
 size_t
-MediaDecoder::SizeOfVideoQueue() {
+MediaDecoder::SizeOfVideoQueue()
+{
   MOZ_ASSERT(NS_IsMainThread());
   if (mDecoderStateMachine) {
     return mDecoderStateMachine->SizeOfVideoQueue();
   }
   return 0;
 }
 
 size_t
-MediaDecoder::SizeOfAudioQueue() {
+MediaDecoder::SizeOfAudioQueue()
+{
   MOZ_ASSERT(NS_IsMainThread());
   if (mDecoderStateMachine) {
     return mDecoderStateMachine->SizeOfAudioQueue();
   }
   return 0;
 }
 
-void MediaDecoder::AddSizeOfResources(ResourceSizes* aSizes) {
+void MediaDecoder::AddSizeOfResources(ResourceSizes* aSizes)
+{
   MOZ_ASSERT(NS_IsMainThread());
   if (GetResource()) {
-    aSizes->mByteSize += GetResource()->SizeOfIncludingThis(aSizes->mMallocSizeOf);
+    aSizes->mByteSize +=
+      GetResource()->SizeOfIncludingThis(aSizes->mMallocSizeOf);
   }
 }
 
 void
-MediaDecoder::NotifyDataArrived() {
+MediaDecoder::NotifyDataArrived()
+{
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
   mDataArrivedEvent.Notify();
 }
 
 // Provide access to the state machine object
 MediaDecoderStateMachine*
-MediaDecoder::GetStateMachine() const {
+MediaDecoder::GetStateMachine() const
+{
   MOZ_ASSERT(NS_IsMainThread());
   return mDecoderStateMachine;
 }
 
 void
 MediaDecoder::FireTimeUpdate()
 {
   MOZ_ASSERT(NS_IsMainThread());
@@ -1587,38 +1614,37 @@ MediaDecoder::IsWebMEnabled()
 {
   return Preferences::GetBool("media.webm.enabled");
 }
 
 #ifdef MOZ_ANDROID_OMX
 bool
 MediaDecoder::IsAndroidMediaPluginEnabled()
 {
-  return AndroidBridge::Bridge() &&
-         AndroidBridge::Bridge()->GetAPIVersion() < 16 &&
-         Preferences::GetBool("media.plugins.enabled");
+  return AndroidBridge::Bridge()
+         && AndroidBridge::Bridge()->GetAPIVersion() < 16
+         && Preferences::GetBool("media.plugins.enabled");
 }
 #endif
 
 NS_IMETHODIMP
 MediaMemoryTracker::CollectReports(nsIHandleReportCallback* aHandleReport,
                                    nsISupports* aData, bool aAnonymize)
 {
-  int64_t video = 0, audio = 0;
-
   // NB: When resourceSizes' ref count goes to 0 the promise will report the
   //     resources memory and finish the asynchronous memory report.
   RefPtr<MediaDecoder::ResourceSizes> resourceSizes =
       new MediaDecoder::ResourceSizes(MediaMemoryTracker::MallocSizeOf);
 
   nsCOMPtr<nsIHandleReportCallback> handleReport = aHandleReport;
   nsCOMPtr<nsISupports> data = aData;
 
   resourceSizes->Promise()->Then(
-      // Non-DocGroup version of AbstractThread::MainThread is fine for memory report.
+      // Non-DocGroup version of AbstractThread::MainThread is fine for memory
+      // report.
       AbstractThread::MainThread(),
       __func__,
       [handleReport, data] (size_t size) {
         handleReport->Callback(
             EmptyCString(), NS_LITERAL_CSTRING("explicit/media/resources"),
             KIND_HEAP, UNITS_BYTES, size,
             NS_LITERAL_CSTRING("Memory used by media resources including "
                                "streaming buffers, caches, etc."),
@@ -1628,16 +1654,18 @@ MediaMemoryTracker::CollectReports(nsIHa
           do_GetService("@mozilla.org/memory-reporter-manager;1");
 
         if (imgr) {
           imgr->EndReport();
         }
       },
       [] (size_t) { /* unused reject function */ });
 
+  int64_t video = 0;
+  int64_t audio = 0;
   DecodersArray& decoders = Decoders();
   for (size_t i = 0; i < decoders.Length(); ++i) {
     MediaDecoder* decoder = decoders[i];
     video += decoder->SizeOfVideoQueue();
     audio += decoder->SizeOfAudioQueue();
     decoder->AddSizeOfResources(resourceSizes);
   }
 
@@ -1726,21 +1754,23 @@ MediaDecoder::RemoveMediaTracks()
 MediaDecoderOwner::NextFrameStatus
 MediaDecoder::NextFrameBufferedStatus()
 {
   MOZ_ASSERT(NS_IsMainThread());
   // Next frame hasn't been decoded yet.
   // Use the buffered range to consider if we have the next frame available.
   media::TimeUnit currentPosition =
     media::TimeUnit::FromMicroseconds(CurrentPosition());
-  media::TimeInterval interval(currentPosition,
-                               currentPosition + media::TimeUnit::FromMicroseconds(DEFAULT_NEXT_FRAME_AVAILABLE_BUFFERED));
+  media::TimeInterval interval(
+    currentPosition,
+    currentPosition
+    + media::TimeUnit::FromMicroseconds(DEFAULT_NEXT_FRAME_AVAILABLE_BUFFERED));
   return GetBuffered().Contains(interval)
-    ? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
-    : MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
+         ? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
+         : MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
 }
 
 nsCString
 MediaDecoder::GetDebugInfo()
 {
   return nsPrintfCString(
     "channels=%u rate=%u hasAudio=%d hasVideo=%d mPlayState=%s mdsm=%p",
     mInfo ? mInfo->mAudio.mChannels : 0, mInfo ? mInfo->mAudio.mRate : 0,
--- a/dom/media/MediaDecoder.h
+++ b/dom/media/MediaDecoder.h
@@ -57,17 +57,18 @@ enum class MediaEventType : int8_t;
 #undef GetCurrentTime
 #endif
 
 class MediaDecoder : public AbstractMediaDecoder
 {
 public:
   // Used to register with MediaResource to receive notifications which will
   // be forwarded to MediaDecoder.
-  class ResourceCallback : public MediaResourceCallback {
+  class ResourceCallback : public MediaResourceCallback
+  {
     // Throttle calls to MediaDecoder::NotifyDataArrived()
     // to be at most once per 500ms.
     static const uint32_t sDelay = 500;
 
   public:
     explicit ResourceCallback(AbstractThread* aMainThread);
     // Start to receive notifications from ResourceCallback.
     void Connect(MediaDecoder* aDecoder);
@@ -91,22 +92,25 @@ public:
 
     // The decoder to send notifications. Main-thread only.
     MediaDecoder* mDecoder = nullptr;
     nsCOMPtr<nsITimer> mTimer;
     bool mTimerArmed = false;
     const RefPtr<AbstractThread> mAbstractMainThread;
   };
 
-  typedef MozPromise<bool /* aIgnored */, bool /* aIgnored */, /* IsExclusive = */ true> SeekPromise;
+  typedef MozPromise<bool /* aIgnored */, bool /* aIgnored */,
+                     /* IsExclusive = */ true>
+    SeekPromise;
 
   NS_DECL_THREADSAFE_ISUPPORTS
 
   // Enumeration for the valid play states (see mPlayState)
-  enum PlayState {
+  enum PlayState
+  {
     PLAY_STATE_START,
     PLAY_STATE_LOADING,
     PLAY_STATE_PAUSED,
     PLAY_STATE_PLAYING,
     PLAY_STATE_ENDED,
     PLAY_STATE_SHUTDOWN
   };
 
@@ -202,17 +206,18 @@ public:
   // is used as the input for each ProcessedMediaStream created by calls to
   // captureStream(UntilEnded). Seeking creates a new source stream, as does
   // replaying after the input as ended. In the latter case, the new source is
   // not connected to streams created by captureStreamUntilEnded.
 
   // Add an output stream. All decoder output will be sent to the stream.
   // The stream is initially blocked. The decoder is responsible for unblocking
   // it while it is playing back.
-  virtual void AddOutputStream(ProcessedMediaStream* aStream, bool aFinishWhenEnded);
+  virtual void AddOutputStream(ProcessedMediaStream* aStream,
+                               bool aFinishWhenEnded);
   // Remove an output stream added with AddOutputStream.
   virtual void RemoveOutputStream(MediaStream* aStream);
 
   // Return the duration of the video in seconds.
   virtual double GetDuration();
 
   // Return true if the stream is infinite (see SetInfinite).
   bool IsInfinite() const;
@@ -425,17 +430,19 @@ private:
 
   MediaDecoderOwner* GetOwner() const override;
 
   AbstractThread* AbstractMainThread() const final override
   {
     return mAbstractMainThread;
   }
 
-  typedef MozPromise<RefPtr<CDMProxy>, bool /* aIgnored */, /* IsExclusive = */ true> CDMProxyPromise;
+  typedef MozPromise<RefPtr<CDMProxy>, bool /* aIgnored */,
+                     /* IsExclusive = */ true>
+    CDMProxyPromise;
 
   // Resolved when a CDMProxy is available and the capabilities are known or
   // rejected when this decoder is about to shut down.
   RefPtr<CDMProxyPromise> RequestCDMProxy() const;
 
   void SetCDMProxy(CDMProxy* aProxy);
 
   void EnsureTelemetryReported();
@@ -471,22 +478,25 @@ private:
 
   void UpdateReadyState()
   {
     MOZ_ASSERT(NS_IsMainThread());
     MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
     GetOwner()->UpdateReadyState();
   }
 
-  virtual MediaDecoderOwner::NextFrameStatus NextFrameStatus() { return mNextFrameStatus; }
+  virtual MediaDecoderOwner::NextFrameStatus NextFrameStatus()
+  {
+    return mNextFrameStatus;
+  }
   virtual MediaDecoderOwner::NextFrameStatus NextFrameBufferedStatus();
 
   // Returns a string describing the state of the media player internal
   // data. Used for debugging purposes.
-  virtual void GetMozDebugReaderData(nsACString& aString) {}
+  virtual void GetMozDebugReaderData(nsACString& aString) { }
 
   virtual void DumpDebugInfo();
 
   using DebugInfoPromise = MozPromise<nsCString, bool, true>;
   RefPtr<DebugInfoPromise> RequestDebugInfo();
 
 protected:
   virtual ~MediaDecoder();
@@ -782,55 +792,56 @@ protected:
   // back again.
   Canonical<int64_t> mDecoderPosition;
 
   // True if the decoder is visible.
   Canonical<bool> mIsVisible;
 
 public:
   AbstractCanonical<media::NullableTimeUnit>* CanonicalDurationOrNull() override;
-  AbstractCanonical<double>* CanonicalVolume() {
-    return &mVolume;
-  }
-  AbstractCanonical<bool>* CanonicalPreservesPitch() {
+  AbstractCanonical<double>* CanonicalVolume() { return &mVolume; }
+  AbstractCanonical<bool>* CanonicalPreservesPitch()
+  {
     return &mPreservesPitch;
   }
-  AbstractCanonical<media::NullableTimeUnit>* CanonicalEstimatedDuration() {
+  AbstractCanonical<media::NullableTimeUnit>* CanonicalEstimatedDuration()
+  {
     return &mEstimatedDuration;
   }
-  AbstractCanonical<Maybe<double>>* CanonicalExplicitDuration() {
+  AbstractCanonical<Maybe<double>>* CanonicalExplicitDuration()
+  {
     return &mExplicitDuration;
   }
-  AbstractCanonical<PlayState>* CanonicalPlayState() {
-    return &mPlayState;
-  }
-  AbstractCanonical<PlayState>* CanonicalNextPlayState() {
-    return &mNextState;
-  }
-  AbstractCanonical<bool>* CanonicalLogicallySeeking() {
+  AbstractCanonical<PlayState>* CanonicalPlayState() { return &mPlayState; }
+  AbstractCanonical<PlayState>* CanonicalNextPlayState() { return &mNextState; }
+  AbstractCanonical<bool>* CanonicalLogicallySeeking()
+  {
     return &mLogicallySeeking;
   }
-  AbstractCanonical<bool>* CanonicalSameOriginMedia() {
+  AbstractCanonical<bool>* CanonicalSameOriginMedia()
+  {
     return &mSameOriginMedia;
   }
-  AbstractCanonical<PrincipalHandle>* CanonicalMediaPrincipalHandle() {
+  AbstractCanonical<PrincipalHandle>* CanonicalMediaPrincipalHandle()
+  {
     return &mMediaPrincipalHandle;
   }
-  AbstractCanonical<double>* CanonicalPlaybackBytesPerSecond() {
+  AbstractCanonical<double>* CanonicalPlaybackBytesPerSecond()
+  {
     return &mPlaybackBytesPerSecond;
   }
-  AbstractCanonical<bool>* CanonicalPlaybackRateReliable() {
+  AbstractCanonical<bool>* CanonicalPlaybackRateReliable()
+  {
     return &mPlaybackRateReliable;
   }
-  AbstractCanonical<int64_t>* CanonicalDecoderPosition() {
+  AbstractCanonical<int64_t>* CanonicalDecoderPosition()
+  {
     return &mDecoderPosition;
   }
-  AbstractCanonical<bool>* CanonicalIsVisible() {
-    return &mIsVisible;
-  }
+  AbstractCanonical<bool>* CanonicalIsVisible() { return &mIsVisible; }
 
 private:
   // Notify owner when the audible state changed
   void NotifyAudibleStateChanged();
 
   /* Functions called by ResourceCallback */
 
   // A media stream is assumed to be infinite if the metadata doesn't
--- a/dom/media/MediaDecoderReader.h
+++ b/dom/media/MediaDecoderReader.h
@@ -24,57 +24,61 @@
 
 namespace mozilla {
 
 class CDMProxy;
 class MediaDecoderReader;
 
 struct WaitForDataRejectValue
 {
-  enum Reason {
+  enum Reason
+  {
     SHUTDOWN,
     CANCELED
   };
 
   WaitForDataRejectValue(MediaData::Type aType, Reason aReason)
-    :mType(aType), mReason(aReason) {}
+    :mType(aType), mReason(aReason)
+  {
+  }
   MediaData::Type mType;
   Reason mReason;
 };
 
 struct SeekRejectValue
 {
   MOZ_IMPLICIT SeekRejectValue(const MediaResult& aError)
-    : mType(MediaData::NULL_DATA), mError(aError) {}
+    : mType(MediaData::NULL_DATA), mError(aError) { }
   MOZ_IMPLICIT SeekRejectValue(nsresult aResult)
-    : mType(MediaData::NULL_DATA), mError(aResult) {}
+    : mType(MediaData::NULL_DATA), mError(aResult) { }
   SeekRejectValue(MediaData::Type aType, const MediaResult& aError)
-    : mType(aType), mError(aError) {}
+    : mType(aType), mError(aError) { }
   MediaData::Type mType;
   MediaResult mError;
 };
 
 class MetadataHolder
 {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MetadataHolder)
   MediaInfo mInfo;
   nsAutoPtr<MetadataTags> mTags;
 
 private:
-  virtual ~MetadataHolder() {}
+  virtual ~MetadataHolder() { }
 };
 
 // Encapsulates the decoding and reading of media data. Reading can either
 // synchronous and done on the calling "decode" thread, or asynchronous and
 // performed on a background thread, with the result being returned by
 // callback.
 // Unless otherwise specified, methods and fields of this class can only
 // be accessed on the decode task queue.
-class MediaDecoderReader {
+class MediaDecoderReader
+{
   friend class ReRequestVideoWithSkipTask;
   friend class ReRequestAudioTask;
 
   static const bool IsExclusive = true;
 
 public:
   using TrackSet = EnumSet<TrackInfo::TrackType>;
 
@@ -99,17 +103,17 @@ public:
 
   // Initializes the reader, returns NS_OK on success, or NS_ERROR_FAILURE
   // on failure.
   nsresult Init();
 
   // Called by MDSM in dormant state to release resources allocated by this
   // reader. The reader can resume decoding by calling Seek() to a specific
   // position.
-  virtual void ReleaseResources() {}
+  virtual void ReleaseResources() { }
 
   // Destroys the decoding state. The reader cannot be made usable again.
   // This is different from ReleaseMediaResources() as it is irreversable,
   // whereas ReleaseMediaResources() is.  Must be called on the decode
   // thread.
   virtual RefPtr<ShutdownPromise> Shutdown();
 
   virtual bool OnTaskQueue() const
@@ -123,18 +127,19 @@ public:
   // decoder must not call any of the callbacks for outstanding
   // Request*Data() calls after this is called. Calls to Request*Data()
   // made after this should be processed as usual.
   //
   // Normally this call preceedes a Seek() call, or shutdown.
   //
   // aParam is a set of TrackInfo::TrackType enums specifying which
   // queues need to be reset, defaulting to both audio and video tracks.
-  virtual nsresult ResetDecode(TrackSet aTracks = TrackSet(TrackInfo::kAudioTrack,
-                                                           TrackInfo::kVideoTrack));
+  virtual nsresult ResetDecode(
+    TrackSet aTracks = TrackSet(TrackInfo::kAudioTrack,
+                                TrackInfo::kVideoTrack));
 
   // Requests one audio sample from the reader.
   //
   // The decode should be performed asynchronously, and the promise should
   // be resolved when it is complete.
   virtual RefPtr<MediaDataPromise> RequestAudioData();
 
   // Requests one video sample from the reader.
@@ -253,17 +258,17 @@ public:
   MediaEventProducer<TrackInfo::TrackType>& OnTrackWaitingForKeyProducer()
   {
     return mOnTrackWaitingForKey;
   }
 
   // Switch the video decoder to BlankDecoderModule. It might takes effective
   // since a few samples later depends on how much demuxed samples are already
   // queued in the original video decoder.
-  virtual void SetVideoBlankDecode(bool aIsBlankDecode) {}
+  virtual void SetVideoBlankDecode(bool aIsBlankDecode) { }
 
 protected:
   virtual ~MediaDecoderReader();
 
   // Recomputes mBuffered.
   virtual void UpdateBuffered();
 
   RefPtr<MediaDataPromise> DecodeToFirstVideoData();
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -134,34 +134,37 @@ static const int64_t LOW_DATA_THRESHOLD_
 static_assert(LOW_DATA_THRESHOLD_USECS > AMPLE_AUDIO_USECS,
               "LOW_DATA_THRESHOLD_USECS is too small");
 
 } // namespace detail
 
 // Amount of excess usecs of data to add in to the "should we buffer" calculation.
 static const uint32_t EXHAUSTED_DATA_MARGIN_USECS = 100000;
 
-static int64_t DurationToUsecs(TimeDuration aDuration) {
+static int64_t DurationToUsecs(TimeDuration aDuration)
+{
   return static_cast<int64_t>(aDuration.ToSeconds() * USECS_PER_S);
 }
 
 static const uint32_t MIN_VIDEO_QUEUE_SIZE = 3;
 static const uint32_t MAX_VIDEO_QUEUE_SIZE = 10;
 #ifdef MOZ_APPLEMEDIA
 static const uint32_t HW_VIDEO_QUEUE_SIZE = 10;
 #else
 static const uint32_t HW_VIDEO_QUEUE_SIZE = 3;
 #endif
 static const uint32_t VIDEO_QUEUE_SEND_TO_COMPOSITOR_SIZE = 9999;
 
 static uint32_t sVideoQueueDefaultSize = MAX_VIDEO_QUEUE_SIZE;
 static uint32_t sVideoQueueHWAccelSize = HW_VIDEO_QUEUE_SIZE;
-static uint32_t sVideoQueueSendToCompositorSize = VIDEO_QUEUE_SEND_TO_COMPOSITOR_SIZE;
-
-static void InitVideoQueuePrefs() {
+static uint32_t sVideoQueueSendToCompositorSize =
+  VIDEO_QUEUE_SEND_TO_COMPOSITOR_SIZE;
+
+static void InitVideoQueuePrefs()
+{
   MOZ_ASSERT(NS_IsMainThread());
   static bool sPrefInit = false;
   if (!sPrefInit) {
     sPrefInit = true;
     sVideoQueueDefaultSize = Preferences::GetUint(
       "media.video-queue.default-size", MAX_VIDEO_QUEUE_SIZE);
     sVideoQueueHWAccelSize = Preferences::GetUint(
       "media.video-queue.hw-accel-size", HW_VIDEO_QUEUE_SIZE);
@@ -177,24 +180,24 @@ SuspendBackgroundVideoDelay()
 {
   return TimeDuration::FromMilliseconds(
     MediaPrefs::MDSMSuspendBackgroundVideoDelay());
 }
 
 class MediaDecoderStateMachine::StateObject
 {
 public:
-  virtual ~StateObject() {}
-  virtual void Exit() {};  // Exit action.
-  virtual void Step() {}   // Perform a 'cycle' of this state object.
+  virtual ~StateObject() { }
+  virtual void Exit() { }   // Exit action.
+  virtual void Step() { }   // Perform a 'cycle' of this state object.
   virtual State GetState() const = 0;
 
   // Event handlers for various events.
-  virtual void HandleCDMProxyReady() {}
-  virtual void HandleAudioCaptured() {}
+  virtual void HandleCDMProxyReady() { }
+  virtual void HandleAudioCaptured() { }
   virtual void HandleAudioDecoded(MediaData* aAudio)
   {
     Crash("Unexpected event!", __func__);
   }
   virtual void HandleVideoDecoded(MediaData* aVideo, TimeStamp aDecodeStart)
   {
     Crash("Unexpected event!", __func__);
   }
@@ -245,17 +248,18 @@ public:
 
 private:
   template <class S, typename R, typename... As>
   auto ReturnTypeHelper(R(S::*)(As...)) -> R;
 
   void Crash(const char* aReason, const char* aSite)
   {
     char buf[1024];
-    SprintfLiteral(buf, "%s state=%s callsite=%s", aReason, ToStateStr(GetState()), aSite);
+    SprintfLiteral(buf, "%s state=%s callsite=%s", aReason,
+                   ToStateStr(GetState()), aSite);
     MOZ_ReportAssertionFailure(buf, __FILE__, __LINE__);
     MOZ_CRASH();
   }
 
 protected:
   enum class EventVisibility : int8_t
   {
     Observable,
@@ -267,37 +271,37 @@ protected:
   TaskQueue* OwnerThread() const { return mMaster->mTaskQueue; }
   MediaResource* Resource() const { return mMaster->mResource; }
   MediaDecoderReaderWrapper* Reader() const { return mMaster->mReader; }
   const MediaInfo& Info() const { return mMaster->Info(); }
   bool IsExpectingMoreData() const
   {
     // We are expecting more data if either the resource states so, or if we
     // have a waiting promise pending (such as with non-MSE EME).
-    return Resource()->IsExpectingMoreData() ||
-           mMaster->IsWaitingAudioData() ||
-           mMaster->IsWaitingVideoData();
+    return Resource()->IsExpectingMoreData()
+           || mMaster->IsWaitingAudioData()
+           || mMaster->IsWaitingVideoData();
   }
   MediaQueue<MediaData>& AudioQueue() const { return mMaster->mAudioQueue; }
   MediaQueue<MediaData>& VideoQueue() const { return mMaster->mVideoQueue; }
 
   // Note this function will delete the current state object.
   // Don't access members to avoid UAF after this call.
   template <class S, typename... Ts>
   auto SetState(Ts... aArgs)
     -> decltype(ReturnTypeHelper(&S::Enter))
   {
     // keep mMaster in a local object because mMaster will become invalid after
     // the current state object is deleted.
     auto master = mMaster;
 
     auto s = new S(master);
 
-    MOZ_ASSERT(GetState() != s->GetState() ||
-               GetState() == DECODER_STATE_SEEKING);
+    MOZ_ASSERT(GetState() != s->GetState()
+               || GetState() == DECODER_STATE_SEEKING);
 
     SLOG("change state to: %s", ToStateStr(s->GetState()));
 
     Exit();
 
     master->mStateObj.reset(s);
     return s->Enter(Move(aArgs)...);
   }
@@ -317,17 +321,17 @@ protected:
  *   SHUTDOWN if failing to decode metadata.
  *   WAIT_FOR_CDM if the media is encrypted and CDM is not available.
  *   DECODING_FIRSTFRAME otherwise.
  */
 class MediaDecoderStateMachine::DecodeMetadataState
   : public MediaDecoderStateMachine::StateObject
 {
 public:
-  explicit DecodeMetadataState(Master* aPtr) : StateObject(aPtr) {}
+  explicit DecodeMetadataState(Master* aPtr) : StateObject(aPtr) { }
 
   void Enter()
   {
     MOZ_ASSERT(!mMaster->mVideoDecodeSuspended);
     MOZ_ASSERT(!mMetadataRequest.Exists());
     SLOG("Dispatching AsyncReadMetadata");
 
     // Set mode to METADATA since we are about to read metadata.
@@ -392,17 +396,17 @@ private:
  * Transition to other states when CDM is ready:
  *   SEEKING if any pending seek request.
  *   DECODING_FIRSTFRAME otherwise.
  */
 class MediaDecoderStateMachine::WaitForCDMState
   : public MediaDecoderStateMachine::StateObject
 {
 public:
-  explicit WaitForCDMState(Master* aPtr) : StateObject(aPtr) {}
+  explicit WaitForCDMState(Master* aPtr) : StateObject(aPtr) { }
 
   void Enter()
   {
     MOZ_ASSERT(!mMaster->mVideoDecodeSuspended);
   }
 
   void Exit() override
   {
@@ -446,32 +450,32 @@ private:
  *
  * Transition to:
  *   SEEKING if any seek request or play state changes to PLAYING.
  */
 class MediaDecoderStateMachine::DormantState
   : public MediaDecoderStateMachine::StateObject
 {
 public:
-  explicit DormantState(Master* aPtr) : StateObject(aPtr) {}
+  explicit DormantState(Master* aPtr) : StateObject(aPtr) { }
 
   void Enter()
   {
     if (mMaster->IsPlaying()) {
       mMaster->StopPlayback();
     }
 
     // Calculate the position to seek to when exiting dormant.
-    auto t = mMaster->mMediaSink->IsStarted()
-      ? mMaster->GetClock()
-      : mMaster->GetMediaTime();
+    auto t = mMaster->mMediaSink->IsStarted() ? mMaster->GetClock()
+                                              : mMaster->GetMediaTime();
     mPendingSeek.mTarget.emplace(t, SeekTarget::Accurate);
     // SeekJob asserts |mTarget.IsValid() == !mPromise.IsEmpty()| so we
     // need to create the promise even it is not used at all.
-    RefPtr<MediaDecoder::SeekPromise> x = mPendingSeek.mPromise.Ensure(__func__);
+    RefPtr<MediaDecoder::SeekPromise> x =
+      mPendingSeek.mPromise.Ensure(__func__);
 
     mMaster->ResetDecode();
     mMaster->StopMediaSink();
     mMaster->mReader->ReleaseResources();
   }
 
   void Exit() override
   {
@@ -508,17 +512,17 @@ private:
  *   SHUTDOWN if any decode error.
  *   SEEKING if any seek request.
  *   DECODING when the 'loadeddata' event is fired.
  */
 class MediaDecoderStateMachine::DecodingFirstFrameState
   : public MediaDecoderStateMachine::StateObject
 {
 public:
-  explicit DecodingFirstFrameState(Master* aPtr) : StateObject(aPtr) {}
+  explicit DecodingFirstFrameState(Master* aPtr) : StateObject(aPtr) { }
 
   void Enter();
 
   State GetState() const override
   {
     return DECODER_STATE_DECODING_FIRSTFRAME;
   }
 
@@ -573,17 +577,18 @@ public:
 
   void HandleVideoWaited(MediaData::Type aType) override
   {
     mMaster->RequestVideoData(false, media::TimeUnit());
   }
 
   void HandleVideoSuspendTimeout() override
   {
-    // Do nothing for we need to decode the 1st video frame to get the dimensions.
+    // Do nothing for we need to decode the 1st video frame to get the
+    // dimensions.
   }
 
   void HandleResumeVideoDecoding() override
   {
     // We never suspend video decoding in this state.
     MOZ_ASSERT(false, "Shouldn't have suspended video decoding.");
   }
 
@@ -623,32 +628,32 @@ public:
     }
     mDormantTimer.Reset();
     mOnAudioPopped.DisconnectIfExists();
     mOnVideoPopped.DisconnectIfExists();
   }
 
   void Step() override
   {
-    if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING &&
-        mMaster->IsPlaying()) {
+    if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING
+        && mMaster->IsPlaying()) {
       // We're playing, but the element/decoder is in paused state. Stop
       // playing!
       mMaster->StopPlayback();
     }
 
     // Start playback if necessary so that the clock can be properly queried.
     if (!mIsPrerolling) {
       mMaster->MaybeStartPlayback();
     }
 
     mMaster->UpdatePlaybackPositionPeriodically();
 
-    MOZ_ASSERT(!mMaster->IsPlaying() ||
-               mMaster->IsStateMachineScheduled(),
+    MOZ_ASSERT(!mMaster->IsPlaying()
+               || mMaster->IsStateMachineScheduled(),
                "Must have timer scheduled");
 
     MaybeStartBuffering();
   }
 
   State GetState() const override
   {
     return DECODER_STATE_DECODING;
@@ -760,18 +765,18 @@ private:
     // since they decode audio and video on different threads so they
     // are unlikely to run out of decoded audio.
     if (Reader()->IsAsync()) {
       return;
     }
 
     TimeDuration decodeTime = TimeStamp::Now() - aDecodeStart;
     int64_t adjustedTime = THRESHOLD_FACTOR * DurationToUsecs(decodeTime);
-    if (adjustedTime > mMaster->mLowAudioThresholdUsecs &&
-        !mMaster->HasLowBufferedData())
+    if (adjustedTime > mMaster->mLowAudioThresholdUsecs
+        && !mMaster->HasLowBufferedData())
     {
       mMaster->mLowAudioThresholdUsecs =
         std::min(adjustedTime, mMaster->mAmpleAudioThresholdUsecs);
 
       mMaster->mAmpleAudioThresholdUsecs =
         std::max(THRESHOLD_FACTOR * mMaster->mLowAudioThresholdUsecs,
                  mMaster->mAmpleAudioThresholdUsecs);
 
@@ -780,33 +785,33 @@ private:
            "mAmpleAudioThresholdUsecs=%lld",
            mMaster->mLowAudioThresholdUsecs,
            mMaster->mAmpleAudioThresholdUsecs);
     }
   }
 
   bool DonePrerollingAudio()
   {
-    return !mMaster->IsAudioDecoding() ||
-           mMaster->GetDecodedAudioDuration() >=
-             mMaster->AudioPrerollUsecs() * mMaster->mPlaybackRate;
+    return !mMaster->IsAudioDecoding()
+           || mMaster->GetDecodedAudioDuration()
+              >= mMaster->AudioPrerollUsecs() * mMaster->mPlaybackRate;
   }
 
   bool DonePrerollingVideo()
   {
-    return !mMaster->IsVideoDecoding() ||
-           static_cast<uint32_t>(mMaster->VideoQueue().GetSize()) >=
-             mMaster->VideoPrerollFrames() * mMaster->mPlaybackRate + 1;
+    return !mMaster->IsVideoDecoding()
+           || static_cast<uint32_t>(mMaster->VideoQueue().GetSize())
+              >= mMaster->VideoPrerollFrames() * mMaster->mPlaybackRate + 1;
   }
 
   void MaybeStopPrerolling()
   {
-    if (mIsPrerolling &&
-        (DonePrerollingAudio() || mMaster->IsWaitingAudioData()) &&
-        (DonePrerollingVideo() || mMaster->IsWaitingVideoData())) {
+    if (mIsPrerolling
+        && (DonePrerollingAudio() || mMaster->IsWaitingAudioData())
+        && (DonePrerollingVideo() || mMaster->IsWaitingVideoData())) {
       mIsPrerolling = false;
       // Check if we can start playback.
       mMaster->ScheduleStateMachine();
     }
   }
 
   void StartDormantTimer()
   {
@@ -870,17 +875,17 @@ private:
  *   SHUTDOWN if seek failed.
  *   COMPLETED if the new playback position is the end of the media resource.
  *   DECODING otherwise.
  */
 class MediaDecoderStateMachine::SeekingState
   : public MediaDecoderStateMachine::StateObject
 {
 public:
-  explicit SeekingState(Master* aPtr) : StateObject(aPtr) {}
+  explicit SeekingState(Master* aPtr) : StateObject(aPtr) { }
 
   RefPtr<MediaDecoder::SeekPromise> Enter(SeekJob aSeekJob,
                                           EventVisibility aVisibility)
   {
     mSeekJob = Move(aSeekJob);
 
     // Always switch off the blank decoder otherwise we might become visible
     // in the middle of seeking and won't have a valid video frame to show
@@ -891,39 +896,42 @@ public:
       Reader()->SetVideoBlankDecode(false);
     }
 
     // Don't stop playback for a video-only seek since audio is playing.
     if (!mSeekJob.mTarget->IsVideoOnly()) {
       mMaster->StopPlayback();
     }
 
-    mMaster->UpdatePlaybackPositionInternal(mSeekJob.mTarget->GetTime().ToMicroseconds());
+    mMaster->UpdatePlaybackPositionInternal(
+      mSeekJob.mTarget->GetTime().ToMicroseconds());
 
     if (aVisibility == EventVisibility::Observable) {
       mMaster->mOnPlaybackEvent.Notify(MediaEventType::SeekStarted);
       // We want dormant actions to be transparent to the user.
       // So we only notify the change when the seek request is from the user.
-      mMaster->UpdateNextFrameStatus(MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING);
+      mMaster->UpdateNextFrameStatus(
+        MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING);
     }
 
     DoSeek();
 
     return mSeekJob.mPromise.Ensure(__func__);
   }
 
   virtual void Exit() override = 0;
 
   State GetState() const override
   {
     return DECODER_STATE_SEEKING;
   }
 
   void HandleAudioDecoded(MediaData* aAudio) override = 0;
-  void HandleVideoDecoded(MediaData* aVideo, TimeStamp aDecodeStart) override = 0;
+  void HandleVideoDecoded(MediaData* aVideo,
+                          TimeStamp aDecodeStart) override = 0;
   void HandleAudioWaited(MediaData::Type aType) override = 0;
   void HandleVideoWaited(MediaData::Type aType) override = 0;
 
   void HandleVideoSuspendTimeout() override
   {
     // Do nothing since we want a valid video frame to show when seek is done.
   }
 
@@ -951,39 +959,42 @@ public:
   explicit AccurateSeekingState(Master* aPtr) : SeekingState(aPtr)
   {
   }
 
   RefPtr<MediaDecoder::SeekPromise> Enter(SeekJob aSeekJob,
                                           EventVisibility aVisibility)
   {
     MOZ_ASSERT(aSeekJob.mTarget->IsAccurate() || aSeekJob.mTarget->IsFast());
-    mCurrentTimeBeforeSeek = TimeUnit::FromMicroseconds(mMaster->GetMediaTime());
+    mCurrentTimeBeforeSeek =
+      TimeUnit::FromMicroseconds(mMaster->GetMediaTime());
     return SeekingState::Enter(Move(aSeekJob), aVisibility);
   }
 
   void Exit() override
   {
     // Disconnect MediaDecoder.
     mSeekJob.RejectIfExists(__func__);
 
     // Disconnect MediaDecoderReaderWrapper.
     mSeekRequest.DisconnectIfExists();
 
     mWaitRequest.DisconnectIfExists();
   }
 
   void HandleAudioDecoded(MediaData* aAudio) override
   {
-    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, "Seek shouldn't be finished");
+    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking,
+               "Seek shouldn't be finished");
     MOZ_ASSERT(aAudio);
 
     // Video-only seek doesn't reset audio decoder. There might be pending audio
-    // requests when AccurateSeekTask::Seek() begins. We will just store the data
-    // without checking |mDiscontinuity| or calling DropAudioUpToSeekTarget().
+    // requests when AccurateSeekTask::Seek() begins. We will just store the
+    // data without checking |mDiscontinuity| or calling
+    // DropAudioUpToSeekTarget().
     if (mSeekJob.mTarget->IsVideoOnly()) {
       mMaster->PushAudio(aAudio);
       return;
     }
 
     AdjustFastSeekIfNeeded(aAudio);
 
     if (mSeekJob.mTarget->IsFast()) {
@@ -1002,17 +1013,18 @@ public:
       RequestAudioData();
       return;
     }
     MaybeFinishSeek();
   }
 
   void HandleVideoDecoded(MediaData* aVideo, TimeStamp aDecodeStart) override
   {
-    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, "Seek shouldn't be finished");
+    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking,
+               "Seek shouldn't be finished");
     MOZ_ASSERT(aVideo);
 
     AdjustFastSeekIfNeeded(aVideo);
 
     if (mSeekJob.mTarget->IsFast()) {
       // Non-precise seek. We can stop the seek at the first sample.
       mMaster->PushVideo(aVideo);
       mDoneVideoSeeking = true;
@@ -1079,28 +1091,30 @@ public:
     }
     VideoQueue().Finish();
     mDoneVideoSeeking = true;
     MaybeFinishSeek();
   }
 
   void HandleAudioWaited(MediaData::Type aType) override
   {
-    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, "Seek shouldn't be finished");
+    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking,
+               "Seek shouldn't be finished");
 
     // Ignore pending requests from video-only seek.
     if (mSeekJob.mTarget->IsVideoOnly()) {
       return;
     }
     RequestAudioData();
   }
 
   void HandleVideoWaited(MediaData::Type aType) override
   {
-    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, "Seek shouldn't be finished");
+    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking,
+               "Seek shouldn't be finished");
 
     RequestVideoData();
   }
 
 private:
   void DemuxerSeek()
   {
     // Request the demuxer to perform seek.
@@ -1129,26 +1143,26 @@ private:
 
     DemuxerSeek();
   }
 
   int64_t CalculateNewCurrentTime() const override
   {
     const int64_t seekTime = mSeekJob.mTarget->GetTime().ToMicroseconds();
 
-    // For the accurate seek, we always set the newCurrentTime = seekTime so that
-    // the updated HTMLMediaElement.currentTime will always be the seek target;
-    // we rely on the MediaSink to handles the gap between the newCurrentTime and
-    // the real decoded samples' start time.
+    // For the accurate seek, we always set the newCurrentTime = seekTime so
+    // that the updated HTMLMediaElement.currentTime will always be the seek
+    // target; we rely on the MediaSink to handles the gap between the
+    // newCurrentTime and the real decoded samples' start time.
     if (mSeekJob.mTarget->IsAccurate()) {
       return seekTime;
     }
 
-    // For the fast seek, we update the newCurrentTime with the decoded audio and
-    // video samples, set it to be the one which is closet to the seekTime.
+    // For the fast seek, we update the newCurrentTime with the decoded audio
+    // and video samples, set it to be the one which is closet to the seekTime.
     if (mSeekJob.mTarget->IsFast()) {
       RefPtr<MediaData> audio = AudioQueue().PeekFront();
       RefPtr<MediaData> video = VideoQueue().PeekFront();
 
       // A situation that both audio and video approaches the end.
       if (!audio && !video) {
         return seekTime;
       }
@@ -1159,56 +1173,62 @@ private:
       const int64_t videoGap = std::abs(videoStart - seekTime);
       return audioGap <= videoGap ? audioStart : videoStart;
     }
 
     MOZ_ASSERT(false, "AccurateSeekTask doesn't handle other seek types.");
     return 0;
   }
 
-  void OnSeekResolved(media::TimeUnit) {
+  void OnSeekResolved(media::TimeUnit)
+  {
     mSeekRequest.Complete();
 
     // We must decode the first samples of active streams, so we can determine
     // the new stream time. So dispatch tasks to do that.
     if (!mDoneVideoSeeking) {
       RequestVideoData();
     }
     if (!mDoneAudioSeeking) {
       RequestAudioData();
     }
   }
 
-  void OnSeekRejected(const SeekRejectValue& aReject) {
+  void OnSeekRejected(const SeekRejectValue& aReject)
+  {
     mSeekRequest.Complete();
 
     if (aReject.mError == NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA) {
       SLOG("OnSeekRejected reason=WAITING_FOR_DATA type=%d", aReject.mType);
       MOZ_ASSERT(!mMaster->IsRequestingAudioData());
       MOZ_ASSERT(!mMaster->IsRequestingVideoData());
       MOZ_ASSERT(!mMaster->IsWaitingAudioData());
       MOZ_ASSERT(!mMaster->IsWaitingVideoData());
       // Fire 'waiting' to notify the player that we are waiting for data.
-      mMaster->UpdateNextFrameStatus(MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING);
-      Reader()->WaitForData(aReject.mType)->Then(
-        OwnerThread(), __func__,
-        [this] (MediaData::Type aType) {
-          SLOG("OnSeekRejected wait promise resolved");
-          mWaitRequest.Complete();
-          DemuxerSeek();
-        },
-        [this] (const WaitForDataRejectValue& aRejection) {
-          SLOG("OnSeekRejected wait promise rejected");
-          mWaitRequest.Complete();
-          mMaster->DecodeError(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA);
-        })->Track(mWaitRequest);
+      mMaster->UpdateNextFrameStatus(
+        MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING);
+      Reader()
+        ->WaitForData(aReject.mType)
+        ->Then(OwnerThread(), __func__,
+               [this](MediaData::Type aType) {
+                 SLOG("OnSeekRejected wait promise resolved");
+                 mWaitRequest.Complete();
+                 DemuxerSeek();
+               },
+               [this](const WaitForDataRejectValue& aRejection) {
+                 SLOG("OnSeekRejected wait promise rejected");
+                 mWaitRequest.Complete();
+                 mMaster->DecodeError(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA);
+               })
+        ->Track(mWaitRequest);
       return;
     }
 
-    MOZ_ASSERT(NS_FAILED(aReject.mError), "Cancels should also disconnect mSeekRequest");
+    MOZ_ASSERT(NS_FAILED(aReject.mError),
+               "Cancels should also disconnect mSeekRequest");
     mMaster->DecodeError(aReject.mError);
   }
 
   void RequestAudioData()
   {
     MOZ_ASSERT(!mDoneAudioSeeking);
     mMaster->RequestAudioData();
   }
@@ -1216,39 +1236,41 @@ private:
   void RequestVideoData()
   {
     MOZ_ASSERT(!mDoneVideoSeeking);
     mMaster->RequestVideoData(false, media::TimeUnit());
   }
 
   void AdjustFastSeekIfNeeded(MediaData* aSample)
   {
-    if (mSeekJob.mTarget->IsFast() &&
-        mSeekJob.mTarget->GetTime() > mCurrentTimeBeforeSeek &&
-        aSample->mTime < mCurrentTimeBeforeSeek.ToMicroseconds()) {
+    if (mSeekJob.mTarget->IsFast()
+        && mSeekJob.mTarget->GetTime() > mCurrentTimeBeforeSeek
+        && aSample->mTime < mCurrentTimeBeforeSeek.ToMicroseconds()) {
       // We are doing a fastSeek, but we ended up *before* the previous
       // playback position. This is surprising UX, so switch to an accurate
       // seek and decode to the seek target. This is not conformant to the
       // spec, fastSeek should always be fast, but until we get the time to
       // change all Readers to seek to the keyframe after the currentTime
       // in this case, we'll just decode forward. Bug 1026330.
       mSeekJob.mTarget->SetType(SeekTarget::Accurate);
     }
   }
 
   nsresult DropAudioUpToSeekTarget(AudioData* aAudio)
   {
     MOZ_ASSERT(aAudio && mSeekJob.mTarget->IsAccurate());
 
-    CheckedInt64 sampleDuration = FramesToUsecs(aAudio->mFrames, Info().mAudio.mRate);
+    CheckedInt64 sampleDuration =
+      FramesToUsecs(aAudio->mFrames, Info().mAudio.mRate);
     if (!sampleDuration.isValid()) {
       return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
     }
 
-    if (aAudio->mTime + sampleDuration.value() <= mSeekJob.mTarget->GetTime().ToMicroseconds()) {
+    if (aAudio->mTime + sampleDuration.value()
+        <= mSeekJob.mTarget->GetTime().ToMicroseconds()) {
       // Our seek target lies after the frames in this AudioData. Don't
       // push it onto the audio queue, and keep decoding forwards.
       return NS_OK;
     }
 
     if (aAudio->mTime > mSeekJob.mTarget->GetTime().ToMicroseconds()) {
       // The seek target doesn't lie in the audio block just after the last
       // audio frames we've seen which were before the seek target. This
@@ -1263,52 +1285,52 @@ private:
       return NS_OK;
     }
 
     // The seek target lies somewhere in this AudioData's frames, strip off
     // any frames which lie before the seek target, so we'll begin playback
     // exactly at the seek target.
     NS_ASSERTION(mSeekJob.mTarget->GetTime().ToMicroseconds() >= aAudio->mTime,
                  "Target must at or be after data start.");
-    NS_ASSERTION(mSeekJob.mTarget->GetTime().ToMicroseconds() < aAudio->mTime + sampleDuration.value(),
+    NS_ASSERTION(mSeekJob.mTarget->GetTime().ToMicroseconds()
+                 < aAudio->mTime + sampleDuration.value(),
                  "Data must end after target.");
 
-    CheckedInt64 framesToPrune =
-      UsecsToFrames(mSeekJob.mTarget->GetTime().ToMicroseconds() - aAudio->mTime, Info().mAudio.mRate);
+    CheckedInt64 framesToPrune = UsecsToFrames(
+      mSeekJob.mTarget->GetTime().ToMicroseconds() - aAudio->mTime,
+      Info().mAudio.mRate);
     if (!framesToPrune.isValid()) {
       return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
     }
     if (framesToPrune.value() > aAudio->mFrames) {
       // We've messed up somehow. Don't try to trim frames, the |frames|
       // variable below will overflow.
       SWARN("Can't prune more frames that we have!");
       return NS_ERROR_FAILURE;
     }
-    uint32_t frames = aAudio->mFrames - static_cast<uint32_t>(framesToPrune.value());
+    uint32_t frames =
+      aAudio->mFrames - static_cast<uint32_t>(framesToPrune.value());
     uint32_t channels = aAudio->mChannels;
     AlignedAudioBuffer audioData(frames * channels);
     if (!audioData) {
       return NS_ERROR_OUT_OF_MEMORY;
     }
 
     memcpy(audioData.get(),
            aAudio->mAudioData.get() + (framesToPrune.value() * channels),
            frames * channels * sizeof(AudioDataValue));
     CheckedInt64 duration = FramesToUsecs(frames, Info().mAudio.mRate);
     if (!duration.isValid()) {
       return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
     }
-    RefPtr<AudioData> data(new AudioData(aAudio->mOffset,
-                                         mSeekJob.mTarget->GetTime().ToMicroseconds(),
-                                         duration.value(),
-                                         frames,
-                                         Move(audioData),
-                                         channels,
-                                         aAudio->mRate));
-    MOZ_ASSERT(AudioQueue().GetSize() == 0, "Should be the 1st sample after seeking");
+    RefPtr<AudioData> data(new AudioData(
+      aAudio->mOffset, mSeekJob.mTarget->GetTime().ToMicroseconds(),
+      duration.value(), frames, Move(audioData), channels, aAudio->mRate));
+    MOZ_ASSERT(AudioQueue().GetSize() == 0,
+               "Should be the 1st sample after seeking");
     mMaster->PushAudio(data);
     mDoneAudioSeeking = true;
 
     return NS_OK;
   }
 
   nsresult DropVideoUpToSeekTarget(MediaData* aSample)
   {
@@ -1321,26 +1343,28 @@ private:
     // If the frame end time is less than the seek target, we won't want
     // to display this frame after the seek, so discard it.
     if (target >= video->GetEndTime()) {
       SLOG("DropVideoUpToSeekTarget() pop video frame [%lld, %lld] target=%lld",
            video->mTime, video->GetEndTime(), target);
       mFirstVideoFrameAfterSeek = video;
     } else {
       if (target >= video->mTime && video->GetEndTime() >= target) {
-        // The seek target lies inside this frame's time slice. Adjust the frame's
-        // start time to match the seek target.
+        // The seek target lies inside this frame's time slice. Adjust the
+        // frame's start time to match the seek target.
         video->UpdateTimestamp(target);
       }
       mFirstVideoFrameAfterSeek = nullptr;
 
-      SLOG("DropVideoUpToSeekTarget() found video frame [%lld, %lld] containing target=%lld",
-                  video->mTime, video->GetEndTime(), target);
-
-      MOZ_ASSERT(VideoQueue().GetSize() == 0, "Should be the 1st sample after seeking");
+      SLOG("DropVideoUpToSeekTarget() found video frame [%lld, %lld] "
+           "containing target=%lld",
+           video->mTime, video->GetEndTime(), target);
+
+      MOZ_ASSERT(VideoQueue().GetSize() == 0,
+                 "Should be the 1st sample after seeking");
       mMaster->PushVideo(video);
       mDoneVideoSeeking = true;
     }
 
     return NS_OK;
   }
 
   void MaybeFinishSeek()
@@ -1418,18 +1442,18 @@ private:
   {
     auto currentTime = mCurrentTime;
     DiscardFrames(VideoQueue(), [currentTime] (int64_t aSampleTime) {
       return aSampleTime <= currentTime;
     });
 
     if (!NeedMoreVideo()) {
       FinishSeek();
-    } else if (!mMaster->IsRequestingVideoData() &&
-               !mMaster->IsWaitingVideoData()) {
+    } else if (!mMaster->IsRequestingVideoData()
+               && !mMaster->IsWaitingVideoData()) {
       RequestVideoData();
     }
   }
 
   class AysncNextFrameSeekTask : public Runnable
   {
   public:
     explicit AysncNextFrameSeekTask(NextFrameSeekingState* aStateObject)
@@ -1555,22 +1579,23 @@ private:
   void RequestVideoData()
   {
     mMaster->RequestVideoData(false, media::TimeUnit());
   }
 
   bool NeedMoreVideo() const
   {
     // Need to request video when we have none and video queue is not finished.
-    return VideoQueue().GetSize() == 0 &&
-           !VideoQueue().IsFinished();
+    return VideoQueue().GetSize() == 0
+           && !VideoQueue().IsFinished();
   }
 
   // Update the seek target's time before resolving this seek task, the updated
-  // time will be used in the MDSM::SeekCompleted() to update the MDSM's position.
+  // time will be used in the MDSM::SeekCompleted() to update the MDSM's
+  // position.
   void UpdateSeekTargetTime()
   {
     RefPtr<MediaData> data = VideoQueue().PeekFront();
     if (data) {
       mSeekJob.mTarget->SetTime(TimeUnit::FromMicroseconds(data->mTime));
     } else {
       MOZ_ASSERT(VideoQueue().AtEndOfStream());
       mSeekJob.mTarget->SetTime(mDuration);
@@ -1604,34 +1629,37 @@ private:
  *   SHUTDOWN if any decode error.
  *   COMPLETED when having decoded all audio/video data.
  *   DECODING when having decoded enough data to continue playback.
  */
 class MediaDecoderStateMachine::BufferingState
   : public MediaDecoderStateMachine::StateObject
 {
 public:
-  explicit BufferingState(Master* aPtr) : StateObject(aPtr) {}
+  explicit BufferingState(Master* aPtr) : StateObject(aPtr) { }
 
   void Enter()
   {
     if (mMaster->IsPlaying()) {
       mMaster->StopPlayback();
     }
 
     mBufferingStart = TimeStamp::Now();
 
     MediaStatistics stats = mMaster->GetStatistics();
     SLOG("Playback rate: %.1lfKB/s%s download rate: %.1lfKB/s%s",
-         stats.mPlaybackRate/1024, stats.mPlaybackRateReliable ? "" : " (unreliable)",
-         stats.mDownloadRate/1024, stats.mDownloadRateReliable ? "" : " (unreliable)");
+         stats.mPlaybackRate / 1024,
+         stats.mPlaybackRateReliable ? "" : " (unreliable)",
+         stats.mDownloadRate / 1024,
+         stats.mDownloadRateReliable ? "" : " (unreliable)");
 
     mMaster->ScheduleStateMachineIn(USECS_PER_S);
 
-    mMaster->UpdateNextFrameStatus(MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING);
+    mMaster->UpdateNextFrameStatus(
+      MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING);
   }
 
   void Step() override;
 
   State GetState() const override
   {
     return DECODER_STATE_BUFFERING;
   }
@@ -1709,66 +1737,68 @@ private:
  *
  * Transition to:
  *   SEEKING if any seek request.
  */
 class MediaDecoderStateMachine::CompletedState
   : public MediaDecoderStateMachine::StateObject
 {
 public:
-  explicit CompletedState(Master* aPtr) : StateObject(aPtr) {}
+  explicit CompletedState(Master* aPtr) : StateObject(aPtr) { }
 
   void Enter()
   {
     // We've decoded all samples. We don't need decoders anymore.
     Reader()->ReleaseResources();
 
     bool hasNextFrame = (!mMaster->HasAudio() || !mMaster->mAudioCompleted)
-      && (!mMaster->HasVideo() || !mMaster->mVideoCompleted);
-
-    mMaster->UpdateNextFrameStatus(hasNextFrame
-      ? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
-      : MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE);
+                        && (!mMaster->HasVideo() || !mMaster->mVideoCompleted);
+
+    mMaster->UpdateNextFrameStatus(
+      hasNextFrame ? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
+                   : MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE);
 
     Step();
   }
 
   void Exit() override
   {
     mSentPlaybackEndedEvent = false;
   }
 
   void Step() override
   {
-    if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING &&
-        mMaster->IsPlaying()) {
+    if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING
+        && mMaster->IsPlaying()) {
       mMaster->StopPlayback();
     }
 
     // Play the remaining media. We want to run AdvanceFrame() at least
     // once to ensure the current playback position is advanced to the
     // end of the media, and so that we update the readyState.
-    if ((mMaster->HasVideo() && !mMaster->mVideoCompleted) ||
-        (mMaster->HasAudio() && !mMaster->mAudioCompleted)) {
+    if ((mMaster->HasVideo() && !mMaster->mVideoCompleted)
+        || (mMaster->HasAudio() && !mMaster->mAudioCompleted)) {
       // Start playback if necessary to play the remaining media.
       mMaster->MaybeStartPlayback();
       mMaster->UpdatePlaybackPositionPeriodically();
-      MOZ_ASSERT(!mMaster->IsPlaying() ||
-                 mMaster->IsStateMachineScheduled(),
+      MOZ_ASSERT(!mMaster->IsPlaying()
+                 || mMaster->IsStateMachineScheduled(),
                  "Must have timer scheduled");
       return;
     }
 
     // StopPlayback in order to reset the IsPlaying() state so audio
     // is restarted correctly.
     mMaster->StopPlayback();
 
     if (!mSentPlaybackEndedEvent) {
-      int64_t clockTime = std::max(mMaster->AudioEndTime(), mMaster->VideoEndTime());
-      clockTime = std::max(int64_t(0), std::max(clockTime, mMaster->Duration().ToMicroseconds()));
+      int64_t clockTime =
+        std::max(mMaster->AudioEndTime(), mMaster->VideoEndTime());
+      clockTime = std::max(
+        int64_t(0), std::max(clockTime, mMaster->Duration().ToMicroseconds()));
       mMaster->UpdatePlaybackPosition(clockTime);
 
       // Ensure readyState is updated before firing the 'ended' event.
       mMaster->UpdateNextFrameStatus(MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE);
 
       mMaster->mOnPlaybackEvent.Notify(MediaEventType::PlaybackEnded);
 
       mSentPlaybackEndedEvent = true;
@@ -1814,17 +1844,17 @@ private:
  *
  * Transition from:
  *   Any states other than SHUTDOWN.
  */
 class MediaDecoderStateMachine::ShutdownState
   : public MediaDecoderStateMachine::StateObject
 {
 public:
-  explicit ShutdownState(Master* aPtr) : StateObject(aPtr) {}
+  explicit ShutdownState(Master* aPtr) : StateObject(aPtr) { }
 
   RefPtr<ShutdownPromise> Enter();
 
   void Exit() override
   {
     MOZ_DIAGNOSTIC_ASSERT(false, "Shouldn't escape the SHUTDOWN state.");
   }
 
@@ -1933,18 +1963,18 @@ StateObject::HandleResumeVideoDecoding()
   const SeekTarget::Type type = mMaster->HasAudio()
                                 ? SeekTarget::Type::Accurate
                                 : SeekTarget::Type::PrevSyncPoint;
 
   seekJob.mTarget.emplace(mMaster->GetMediaTime(),
                           type,
                           true /* aVideoOnly */);
 
-  // Hold mMaster->mAbstractMainThread here because this->mMaster will be invalid
-  // after the current state object is deleted in SetState();
+  // Hold mMaster->mAbstractMainThread here because this->mMaster will be
+  // invalid after the current state object is deleted in SetState();
   RefPtr<AbstractThread> mainThread = mMaster->mAbstractMainThread;
 
   SetSeekingState(Move(seekJob), EventVisibility::Suppressed)->Then(
     mainThread, __func__,
     [start, info, hw](){ ReportRecoveryTelemetry(start, info, hw); },
     [](){});
 }
 
@@ -1971,17 +2001,18 @@ DecodeMetadataState::OnMetadataRead(Meta
   mMetadataRequest.Complete();
 
   // Set mode to PLAYBACK after reading metadata.
   Resource()->SetReadMode(MediaCacheStream::MODE_PLAYBACK);
 
   mMaster->mInfo.emplace(aMetadata->mInfo);
   mMaster->mMetadataTags = aMetadata->mTags.forget();
   mMaster->mMediaSeekable = Info().mMediaSeekable;
-  mMaster->mMediaSeekableOnlyInBufferedRanges = Info().mMediaSeekableOnlyInBufferedRanges;
+  mMaster->mMediaSeekableOnlyInBufferedRanges =
+    Info().mMediaSeekableOnlyInBufferedRanges;
 
   if (Info().mMetadataDuration.isSome()) {
     mMaster->RecomputeDuration();
   } else if (Info().mUnadjustedMetadataEndTime.isSome()) {
     const TimeUnit unadjusted = Info().mUnadjustedMetadataEndTime.ref();
     const TimeUnit adjustment = Info().mStartTime;
     mMaster->mInfo->mMetadataDuration.emplace(unadjusted - adjustment);
     mMaster->RecomputeDuration();
@@ -2057,34 +2088,34 @@ DecodingFirstFrameState::Enter()
 }
 
 void
 MediaDecoderStateMachine::
 DecodingFirstFrameState::MaybeFinishDecodeFirstFrame()
 {
   MOZ_ASSERT(!mMaster->mSentFirstFrameLoadedEvent);
 
-  if ((mMaster->IsAudioDecoding() && AudioQueue().GetSize() == 0) ||
-      (mMaster->IsVideoDecoding() && VideoQueue().GetSize() == 0)) {
+  if ((mMaster->IsAudioDecoding() && AudioQueue().GetSize() == 0)
+      || (mMaster->IsVideoDecoding() && VideoQueue().GetSize() == 0)) {
     return;
   }
 
   mMaster->FinishDecodeFirstFrame();
   SetState<DecodingState>();
 }
 
 void
 MediaDecoderStateMachine::
 DecodingState::Enter()
 {
   MOZ_ASSERT(mMaster->mSentFirstFrameLoadedEvent);
 
-  if (!mMaster->mIsVisible &&
-      !mMaster->mVideoDecodeSuspendTimer.IsScheduled() &&
-      !mMaster->mVideoDecodeSuspended) {
+  if (!mMaster->mIsVisible
+      && !mMaster->mVideoDecodeSuspendTimer.IsScheduled()
+      && !mMaster->mVideoDecodeSuspended) {
     // If we are not visible and the timer is not schedule, it means the timer
     // has timed out and we should suspend video decoding now if necessary.
     HandleVideoSuspendTimeout();
   }
 
   if (!mMaster->IsVideoDecoding() && !mMaster->IsAudioDecoding()) {
     SetState<CompletedState>();
     return;
@@ -2143,52 +2174,53 @@ DecodingState::HandleEndOfVideo()
     MaybeStopPrerolling();
   }
 }
 
 void
 MediaDecoderStateMachine::
 DecodingState::DispatchDecodeTasksIfNeeded()
 {
-  if (mMaster->IsAudioDecoding() &&
-      !mMaster->mMinimizePreroll &&
-      !mMaster->HaveEnoughDecodedAudio()) {
+  if (mMaster->IsAudioDecoding()
+      && !mMaster->mMinimizePreroll
+      && !mMaster->HaveEnoughDecodedAudio()) {
     EnsureAudioDecodeTaskQueued();
   }
 
-  if (mMaster->IsVideoDecoding() &&
-      !mMaster->mMinimizePreroll &&
-      !mMaster->HaveEnoughDecodedVideo()) {
+  if (mMaster->IsVideoDecoding()
+      && !mMaster->mMinimizePreroll
+      && !mMaster->HaveEnoughDecodedVideo()) {
     EnsureVideoDecodeTaskQueued();
   }
 }
 
 void
 MediaDecoderStateMachine::
 DecodingState::EnsureAudioDecodeTaskQueued()
 {
-  if (!mMaster->IsAudioDecoding() ||
-      mMaster->IsRequestingAudioData() ||
-      mMaster->IsWaitingAudioData()) {
+  if (!mMaster->IsAudioDecoding()
+      || mMaster->IsRequestingAudioData()
+      || mMaster->IsWaitingAudioData()) {
     return;
   }
   mMaster->RequestAudioData();
 }
 
 void
 MediaDecoderStateMachine::
 DecodingState::EnsureVideoDecodeTaskQueued()
 {
-  if (!mMaster->IsVideoDecoding() ||
-      mMaster->IsRequestingVideoData() ||
-      mMaster->IsWaitingVideoData()) {
+  if (!mMaster->IsVideoDecoding()
+      || mMaster->IsRequestingVideoData()
+      || mMaster->IsWaitingVideoData()) {
     return;
   }
-  mMaster->RequestVideoData(NeedToSkipToNextKeyframe(),
-                            media::TimeUnit::FromMicroseconds(mMaster->GetMediaTime()));
+  mMaster->RequestVideoData(
+    NeedToSkipToNextKeyframe(),
+    media::TimeUnit::FromMicroseconds(mMaster->GetMediaTime()));
 }
 
 bool
 MediaDecoderStateMachine::
 DecodingState::NeedToSkipToNextKeyframe()
 {
   // Since GetClock() can only be called after starting MediaSink, we return
   // false quickly if it is not started because we won't fall behind playback
@@ -2206,27 +2238,32 @@ DecodingState::NeedToSkipToNextKeyframe(
   // We'll skip the video decode to the next keyframe if we're low on
   // audio, or if we're low on video, provided we're not running low on
   // data to decode. If we're running low on downloaded data to decode,
   // we won't start keyframe skipping, as we'll be pausing playback to buffer
   // soon anyway and we'll want to be able to display frames immediately
   // after buffering finishes. We ignore the low audio calculations for
   // readers that are async, as since their audio decode runs on a different
   // task queue it should never run low and skipping won't help their decode.
-  bool isLowOnDecodedAudio = !Reader()->IsAsync() &&
-                             mMaster->IsAudioDecoding() &&
-                             (mMaster->GetDecodedAudioDuration() <
-                              mMaster->mLowAudioThresholdUsecs * mMaster->mPlaybackRate);
-  bool isLowOnDecodedVideo = (mMaster->GetClock() - mMaster->mDecodedVideoEndTime) * mMaster->mPlaybackRate >
-                             LOW_VIDEO_THRESHOLD_USECS;
+  bool isLowOnDecodedAudio =
+    !Reader()->IsAsync()
+    && mMaster->IsAudioDecoding()
+    && (mMaster->GetDecodedAudioDuration()
+        < mMaster->mLowAudioThresholdUsecs * mMaster->mPlaybackRate);
+  bool isLowOnDecodedVideo =
+    (mMaster->GetClock() - mMaster->mDecodedVideoEndTime)
+    * mMaster->mPlaybackRate
+    > LOW_VIDEO_THRESHOLD_USECS;
   bool lowBuffered = mMaster->HasLowBufferedData();
 
   if ((isLowOnDecodedAudio || isLowOnDecodedVideo) && !lowBuffered) {
-    SLOG("Skipping video decode to the next keyframe lowAudio=%d lowVideo=%d lowUndecoded=%d async=%d",
-         isLowOnDecodedAudio, isLowOnDecodedVideo, lowBuffered, Reader()->IsAsync());
+    SLOG("Skipping video decode to the next keyframe lowAudio=%d lowVideo=%d "
+         "lowUndecoded=%d async=%d",
+         isLowOnDecodedAudio, isLowOnDecodedVideo, lowBuffered,
+         Reader()->IsAsync());
     return true;
   }
 
   return false;
 }
 
 void
 MediaDecoderStateMachine::
@@ -2243,23 +2280,23 @@ DecodingState::MaybeStartBuffering()
   // Don't enter buffering while prerolling so that the decoder has a chance to
   // enqueue some decoded data before we give up and start buffering.
   if (!mMaster->IsPlaying()) {
     return;
   }
 
   bool shouldBuffer;
   if (Reader()->UseBufferingHeuristics()) {
-    shouldBuffer = IsExpectingMoreData() &&
-                   mMaster->HasLowDecodedData() &&
-                   mMaster->HasLowBufferedData();
+    shouldBuffer = IsExpectingMoreData()
+                   && mMaster->HasLowDecodedData()
+                   && mMaster->HasLowBufferedData();
   } else {
     shouldBuffer =
-      (mMaster->OutOfDecodedAudio() && mMaster->IsWaitingAudioData()) ||
-      (mMaster->OutOfDecodedVideo() && mMaster->IsWaitingVideoData());
+      (mMaster->OutOfDecodedAudio() && mMaster->IsWaitingAudioData())
+      || (mMaster->OutOfDecodedVideo() && mMaster->IsWaitingVideoData());
   }
   if (shouldBuffer) {
     SetState<BufferingState>();
   }
 }
 
 void
 MediaDecoderStateMachine::
@@ -2300,41 +2337,42 @@ SeekingState::SeekCompleted()
   if (!target.IsVideoOnly()) {
     // Don't update playback position for video-only seek.
     // Otherwise we might have |newCurrentTime > mMediaSink->GetPosition()|
     // and fail the assertion in GetClock() since we didn't stop MediaSink.
     mMaster->UpdatePlaybackPositionInternal(newCurrentTime);
   }
 
   // Try to decode another frame to detect if we're at the end...
-  SLOG("Seek completed, mCurrentPosition=%lld", mMaster->mCurrentPosition.Ref());
+  SLOG("Seek completed, mCurrentPosition=%lld",
+       mMaster->mCurrentPosition.Ref());
 
   if (mMaster->VideoQueue().PeekFront()) {
     mMaster->mMediaSink->Redraw(Info().mVideo);
     mMaster->mOnPlaybackEvent.Notify(MediaEventType::Invalidate);
   }
 
   SetState<DecodingState>();
 }
 
 void
 MediaDecoderStateMachine::
 BufferingState::DispatchDecodeTasksIfNeeded()
 {
-  if (mMaster->IsAudioDecoding() &&
-      !mMaster->HaveEnoughDecodedAudio() &&
-      !mMaster->IsRequestingAudioData() &&
-      !mMaster->IsWaitingAudioData()) {
+  if (mMaster->IsAudioDecoding()
+      && !mMaster->HaveEnoughDecodedAudio()
+      && !mMaster->IsRequestingAudioData()
+      && !mMaster->IsWaitingAudioData()) {
     mMaster->RequestAudioData();
   }
 
-  if (mMaster->IsVideoDecoding() &&
-      !mMaster->HaveEnoughDecodedVideo() &&
-      !mMaster->IsRequestingVideoData() &&
-      !mMaster->IsWaitingVideoData()) {
+  if (mMaster->IsVideoDecoding()
+      && !mMaster->HaveEnoughDecodedVideo()
+      && !mMaster->IsRequestingVideoData()
+      && !mMaster->IsWaitingVideoData()) {
     mMaster->RequestVideoData(false, media::TimeUnit());
   }
 }
 
 void
 MediaDecoderStateMachine::
 BufferingState::Step()
 {
@@ -2342,34 +2380,35 @@ BufferingState::Step()
   MOZ_ASSERT(!mBufferingStart.IsNull(), "Must know buffering start time.");
 
   // With buffering heuristics we will remain in the buffering state if
   // we've not decoded enough data to begin playback, or if we've not
   // downloaded a reasonable amount of data inside our buffering time.
   if (Reader()->UseBufferingHeuristics()) {
     TimeDuration elapsed = now - mBufferingStart;
     bool isLiveStream = Resource()->IsLiveStream();
-    if ((isLiveStream || !mMaster->CanPlayThrough()) &&
-        elapsed < TimeDuration::FromSeconds(mBufferingWait * mMaster->mPlaybackRate) &&
-        mMaster->HasLowBufferedData(mBufferingWait * USECS_PER_S) &&
-        IsExpectingMoreData()) {
+    if ((isLiveStream || !mMaster->CanPlayThrough())
+        && elapsed
+           < TimeDuration::FromSeconds(mBufferingWait * mMaster->mPlaybackRate)
+        && mMaster->HasLowBufferedData(mBufferingWait * USECS_PER_S)
+        && IsExpectingMoreData()) {
       SLOG("Buffering: wait %ds, timeout in %.3lfs",
            mBufferingWait, mBufferingWait - elapsed.ToSeconds());
       mMaster->ScheduleStateMachineIn(USECS_PER_S);
       DispatchDecodeTasksIfNeeded();
       return;
     }
   } else if (mMaster->OutOfDecodedAudio() || mMaster->OutOfDecodedVideo()) {
     DispatchDecodeTasksIfNeeded();
-    MOZ_ASSERT(!mMaster->OutOfDecodedAudio() ||
-               mMaster->IsRequestingAudioData() ||
-               mMaster->IsWaitingAudioData());
-    MOZ_ASSERT(!mMaster->OutOfDecodedVideo() ||
-               mMaster->IsRequestingVideoData() ||
-               mMaster->IsWaitingVideoData());
+    MOZ_ASSERT(!mMaster->OutOfDecodedAudio()
+               || mMaster->IsRequestingAudioData()
+               || mMaster->IsWaitingAudioData());
+    MOZ_ASSERT(!mMaster->OutOfDecodedVideo()
+               || mMaster->IsRequestingVideoData()
+               || mMaster->IsWaitingVideoData());
     SLOG("In buffering mode, waiting to be notified: outOfAudio: %d, "
          "mAudioStatus: %s, outOfVideo: %d, mVideoStatus: %s",
          mMaster->OutOfDecodedAudio(), mMaster->AudioRequestStatus(),
          mMaster->OutOfDecodedVideo(), mMaster->VideoRequestStatus());
     return;
   }
 
   SLOG("Buffered for %.3lfs", (now - mBufferingStart).ToSeconds());
@@ -2525,19 +2564,19 @@ MediaDecoderStateMachine::MediaDecoderSt
 {
   MOZ_COUNT_CTOR(MediaDecoderStateMachine);
   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
 
   InitVideoQueuePrefs();
 
 #ifdef XP_WIN
   // Ensure high precision timers are enabled on Windows, otherwise the state
-  // machine isn't woken up at reliable intervals to set the next frame,
-  // and we drop frames while painting. Note that multiple calls to this
-  // function per-process is OK, provided each call is matched by a corresponding
+  // machine isn't woken up at reliable intervals to set the next frame, and we
+  // drop frames while painting. Note that multiple calls to this function
+  // per-process is OK, provided each call is matched by a corresponding
   // timeEndPeriod() call.
   timeBeginPeriod(1);
 #endif
 }
 
 #undef INIT_WATCHABLE
 #undef INIT_MIRROR
 #undef INIT_CANONICAL
@@ -2567,27 +2606,33 @@ MediaDecoderStateMachine::Initialization
   mPreservesPitch.Connect(aDecoder->CanonicalPreservesPitch());
   mSameOriginMedia.Connect(aDecoder->CanonicalSameOriginMedia());
   mMediaPrincipalHandle.Connect(aDecoder->CanonicalMediaPrincipalHandle());
   mPlaybackBytesPerSecond.Connect(aDecoder->CanonicalPlaybackBytesPerSecond());
   mPlaybackRateReliable.Connect(aDecoder->CanonicalPlaybackRateReliable());
   mDecoderPosition.Connect(aDecoder->CanonicalDecoderPosition());
 
   // Initialize watchers.
-  mWatchManager.Watch(mBuffered, &MediaDecoderStateMachine::BufferedRangeUpdated);
+  mWatchManager.Watch(mBuffered,
+                      &MediaDecoderStateMachine::BufferedRangeUpdated);
   mWatchManager.Watch(mVolume, &MediaDecoderStateMachine::VolumeChanged);
-  mWatchManager.Watch(mPreservesPitch, &MediaDecoderStateMachine::PreservesPitchChanged);
-  mWatchManager.Watch(mEstimatedDuration, &MediaDecoderStateMachine::RecomputeDuration);
-  mWatchManager.Watch(mExplicitDuration, &MediaDecoderStateMachine::RecomputeDuration);
-  mWatchManager.Watch(mObservedDuration, &MediaDecoderStateMachine::RecomputeDuration);
+  mWatchManager.Watch(mPreservesPitch,
+                      &MediaDecoderStateMachine::PreservesPitchChanged);
+  mWatchManager.Watch(mEstimatedDuration,
+                      &MediaDecoderStateMachine::RecomputeDuration);
+  mWatchManager.Watch(mExplicitDuration,
+                      &MediaDecoderStateMachine::RecomputeDuration);
+  mWatchManager.Watch(mObservedDuration,
+                      &MediaDecoderStateMachine::RecomputeDuration);
   mWatchManager.Watch(mPlayState, &MediaDecoderStateMachine::PlayStateChanged);
 
   if (MediaPrefs::MDSMSuspendBackgroundVideoEnabled()) {
     mIsVisible.Connect(aDecoder->CanonicalIsVisible());
-    mWatchManager.Watch(mIsVisible, &MediaDecoderStateMachine::VisibilityChanged);
+    mWatchManager.Watch(mIsVisible,
+                        &MediaDecoderStateMachine::VisibilityChanged);
   }
 }
 
 void
 MediaDecoderStateMachine::AudioAudibleChanged(bool aAudible)
 {
   mIsAudioDataAudible = aAudible;
 }
@@ -2598,29 +2643,31 @@ MediaDecoderStateMachine::CreateAudioSin
   RefPtr<MediaDecoderStateMachine> self = this;
   auto audioSinkCreator = [self] () {
     MOZ_ASSERT(self->OnTaskQueue());
     DecodedAudioDataSink* audioSink = new DecodedAudioDataSink(
       self->mTaskQueue, self->mAudioQueue, self->GetMediaTime(),
       self->Info().mAudio, self->mAudioChannel);
 
     self->mAudibleListener = audioSink->AudibleEvent().Connect(
-      self->mTaskQueue, self.get(), &MediaDecoderStateMachine::AudioAudibleChanged);
+      self->mTaskQueue, self.get(),
+      &MediaDecoderStateMachine::AudioAudibleChanged);
     return audioSink;
   };
   return new AudioSinkWrapper(mTaskQueue, audioSinkCreator);
 }
 
 already_AddRefed<media::MediaSink>
 MediaDecoderStateMachine::CreateMediaSink(bool aAudioCaptured)
 {
-  RefPtr<media::MediaSink> audioSink = aAudioCaptured
-    ? new DecodedStream(mTaskQueue, mAbstractMainThread, mAudioQueue, mVideoQueue,
-                        mOutputStreamManager, mSameOriginMedia.Ref(),
-                        mMediaPrincipalHandle.Ref())
+  RefPtr<media::MediaSink> audioSink =
+    aAudioCaptured
+    ? new DecodedStream(mTaskQueue, mAbstractMainThread, mAudioQueue,
+                        mVideoQueue, mOutputStreamManager,
+                        mSameOriginMedia.Ref(), mMediaPrincipalHandle.Ref())
     : CreateAudioSink();
 
   RefPtr<media::MediaSink> mediaSink =
     new VideoSink(mTaskQueue, audioSink, mVideoQueue,
                   mVideoFrameContainer, *mFrameStats,
                   sVideoQueueSendToCompositorSize);
   return mediaSink.forget();
 }
@@ -2639,18 +2686,18 @@ MediaDecoderStateMachine::GetDecodedAudi
   return AudioQueue().Duration();
 }
 
 bool
 MediaDecoderStateMachine::HaveEnoughDecodedAudio()
 {
   MOZ_ASSERT(OnTaskQueue());
   auto ampleAudioUSecs = mAmpleAudioThresholdUsecs * mPlaybackRate;
-  return AudioQueue().GetSize() > 0 &&
-         GetDecodedAudioDuration() >= ampleAudioUSecs;
+  return AudioQueue().GetSize() > 0
+         && GetDecodedAudioDuration() >= ampleAudioUSecs;
 }
 
 bool MediaDecoderStateMachine::HaveEnoughDecodedVideo()
 {
   MOZ_ASSERT(OnTaskQueue());
   return VideoQueue().GetSize() >= GetAmpleVideoFrames() * mPlaybackRate + 1;
 }
 
@@ -2799,17 +2846,18 @@ void MediaDecoderStateMachine::UpdatePla
                                TimeUnit::FromMicroseconds(mCurrentPosition.Ref()));
 }
 
 void MediaDecoderStateMachine::UpdatePlaybackPosition(int64_t aTime)
 {
   MOZ_ASSERT(OnTaskQueue());
   UpdatePlaybackPositionInternal(aTime);
 
-  bool fragmentEnded = mFragmentEndTime >= 0 && GetMediaTime() >= mFragmentEndTime;
+  bool fragmentEnded =
+    mFragmentEndTime >= 0 && GetMediaTime() >= mFragmentEndTime;
   mMetadataManager.DispatchMetadataIfNeeded(TimeUnit::FromMicroseconds(aTime));
 
   if (fragmentEnded) {
     StopPlayback();
   }
 }
 
 /* static */ const char*
@@ -2868,18 +2916,18 @@ void MediaDecoderStateMachine::Recompute
   } else {
     return;
   }
 
   // Only adjust the duration when an explicit duration isn't set (MSE).
   // The duration is always exactly known with MSE and there's no need to adjust
   // it based on what may have been seen in the past; in particular as this data
   // may no longer exist such as when the mediasource duration was reduced.
-  if (mExplicitDuration.Ref().isNothing() &&
-      duration < mObservedDuration.Ref()) {
+  if (mExplicitDuration.Ref().isNothing()
+      && duration < mObservedDuration.Ref()) {
     duration = mObservedDuration;
   }
 
   MOZ_ASSERT(duration.ToMicroseconds() >= 0);
   mDuration = Some(duration);
 }
 
 RefPtr<ShutdownPromise>
@@ -2892,18 +2940,18 @@ MediaDecoderStateMachine::Shutdown()
 void MediaDecoderStateMachine::PlayStateChanged()
 {
   MOZ_ASSERT(OnTaskQueue());
 
   if (mPlayState != MediaDecoder::PLAY_STATE_PLAYING) {
     mVideoDecodeSuspendTimer.Reset();
   } else if (mMinimizePreroll) {
     // Once we start playing, we don't want to minimize our prerolling, as we
-    // assume the user is likely to want to keep playing in future. This needs to
-    // happen before we invoke StartDecoding().
+    // assume the user is likely to want to keep playing in future. This needs
+    // to happen before we invoke StartDecoding().
     mMinimizePreroll = false;
   }
 
   mStateObj->HandlePlayStateChanged(mPlayState);
 }
 
 void MediaDecoderStateMachine::VisibilityChanged()
 {
@@ -2951,28 +2999,33 @@ void MediaDecoderStateMachine::BufferedR
 }
 
 RefPtr<MediaDecoder::SeekPromise>
 MediaDecoderStateMachine::Seek(const SeekTarget& aTarget)
 {
   MOZ_ASSERT(OnTaskQueue());
 
   if (IsShutdown()) {
-    return MediaDecoder::SeekPromise::CreateAndReject(/* aIgnored = */ true, __func__);
+    return MediaDecoder::SeekPromise::CreateAndReject(/* aIgnored = */ true,
+                                                      __func__);
   }
 
   // We need to be able to seek in some way
   if (!mMediaSeekable && !mMediaSeekableOnlyInBufferedRanges) {
-    DECODER_WARN("Seek() function should not be called on a non-seekable state machine");
-    return MediaDecoder::SeekPromise::CreateAndReject(/* aIgnored = */ true, __func__);
+    DECODER_WARN(
+      "Seek() function should not be called on a non-seekable state machine");
+    return MediaDecoder::SeekPromise::CreateAndReject(/* aIgnored = */ true,
+                                                      __func__);
   }
 
   if (aTarget.IsNextFrame() && !HasVideo()) {
-    DECODER_WARN("Ignore a NextFrameSeekTask on a media file without video track.");
-    return MediaDecoder::SeekPromise::CreateAndReject(/* aIgnored = */ true, __func__);
+    DECODER_WARN(
+      "Ignore a NextFrameSeekTask on a media file without video track.");
+    return MediaDecoder::SeekPromise::CreateAndReject(/* aIgnored = */ true,
+                                                      __func__);
   }
 
   MOZ_ASSERT(mDuration.Ref().isSome(), "We should have got duration already");
 
   return mStateObj->HandleSeek(aTarget);
 }
 
 RefPtr<MediaDecoder::SeekPromise>
@@ -3007,18 +3060,20 @@ MediaDecoderStateMachine::RequestAudioDa
              AudioQueue().GetSize(), mReader->SizeOfAudioQueueInFrames());
 
   mReader->RequestAudioData()->Then(
     OwnerThread(), __func__,
     [this] (MediaData* aAudio) {
       MOZ_ASSERT(aAudio);
       mAudioDataRequest.Complete();
       // audio->GetEndTime() is not always mono-increasing in chained ogg.
-      mDecodedAudioEndTime = std::max(aAudio->GetEndTime(), mDecodedAudioEndTime);
-      SAMPLE_LOG("OnAudioDecoded [%lld,%lld]", aAudio->mTime, aAudio->GetEndTime());
+      mDecodedAudioEndTime =
+        std::max(aAudio->GetEndTime(), mDecodedAudioEndTime);
+      SAMPLE_LOG("OnAudioDecoded [%lld,%lld]", aAudio->mTime,
+                 aAudio->GetEndTime());
       mStateObj->HandleAudioDecoded(aAudio);
     },
     [this] (const MediaResult& aError) {
       SAMPLE_LOG("OnAudioNotDecoded aError=%u", aError.Code());
       mAudioDataRequest.Complete();
       switch (aError.Code()) {
         case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA:
           mStateObj->HandleWaitingForAudio();
@@ -3038,29 +3093,32 @@ MediaDecoderStateMachine::RequestAudioDa
 void
 MediaDecoderStateMachine::RequestVideoData(bool aSkipToNextKeyframe,
                                            const media::TimeUnit& aCurrentTime)
 {
   MOZ_ASSERT(OnTaskQueue());
   MOZ_ASSERT(IsVideoDecoding());
   MOZ_ASSERT(!IsRequestingVideoData());
   MOZ_ASSERT(!IsWaitingVideoData());
-  SAMPLE_LOG("Queueing video task - queued=%i, decoder-queued=%o, skip=%i, time=%lld",
-             VideoQueue().GetSize(), mReader->SizeOfVideoQueueInFrames(), aSkipToNextKeyframe,
-             aCurrentTime.ToMicroseconds());
+  SAMPLE_LOG(
+    "Queueing video task - queued=%i, decoder-queued=%o, skip=%i, time=%lld",
+    VideoQueue().GetSize(), mReader->SizeOfVideoQueueInFrames(),
+    aSkipToNextKeyframe, aCurrentTime.ToMicroseconds());
 
   TimeStamp videoDecodeStartTime = TimeStamp::Now();
   mReader->RequestVideoData(aSkipToNextKeyframe, aCurrentTime)->Then(
     OwnerThread(), __func__,
     [this, videoDecodeStartTime] (MediaData* aVideo) {
       MOZ_ASSERT(aVideo);
       mVideoDataRequest.Complete();
       // Handle abnormal or negative timestamps.
-      mDecodedVideoEndTime = std::max(mDecodedVideoEndTime, aVideo->GetEndTime());
-      SAMPLE_LOG("OnVideoDecoded [%lld,%lld]", aVideo->mTime, aVideo->GetEndTime());
+      mDecodedVideoEndTime =
+        std::max(mDecodedVideoEndTime, aVideo->GetEndTime());
+      SAMPLE_LOG("OnVideoDecoded [%lld,%lld]", aVideo->mTime,
+                 aVideo->GetEndTime());
       mStateObj->HandleVideoDecoded(aVideo, videoDecodeStartTime);
     },
     [this] (const MediaResult& aError) {
       SAMPLE_LOG("OnVideoNotDecoded aError=%u", aError.Code());
       mVideoDataRequest.Complete();
       switch (aError.Code()) {
         case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA:
           mStateObj->HandleWaitingForVideo();
@@ -3136,42 +3194,43 @@ MediaDecoderStateMachine::StartMediaSink
     }
   }
 }
 
 bool
 MediaDecoderStateMachine::HasLowDecodedAudio()
 {
   MOZ_ASSERT(OnTaskQueue());
-  return IsAudioDecoding() &&
-         GetDecodedAudioDuration() < EXHAUSTED_DATA_MARGIN_USECS * mPlaybackRate;
+  return IsAudioDecoding()
+         && GetDecodedAudioDuration()
+            < EXHAUSTED_DATA_MARGIN_USECS * mPlaybackRate;
 }
 
 bool
 MediaDecoderStateMachine::HasLowDecodedVideo()
 {
   MOZ_ASSERT(OnTaskQueue());
-  return IsVideoDecoding() &&
-         VideoQueue().GetSize() < LOW_VIDEO_FRAMES * mPlaybackRate;
+  return IsVideoDecoding()
+         && VideoQueue().GetSize() < LOW_VIDEO_FRAMES * mPlaybackRate;
 }
 
 bool
 MediaDecoderStateMachine::HasLowDecodedData()
 {
   MOZ_ASSERT(OnTaskQueue());
   MOZ_ASSERT(mReader->UseBufferingHeuristics());
   return HasLowDecodedAudio() || HasLowDecodedVideo();
 }
 
 bool MediaDecoderStateMachine::OutOfDecodedAudio()
 {
     MOZ_ASSERT(OnTaskQueue());
-    return IsAudioDecoding() && !AudioQueue().IsFinished() &&
-           AudioQueue().GetSize() == 0 &&
-           !mMediaSink->HasUnplayedFrames(TrackInfo::kAudioTrack);
+    return IsAudioDecoding() && !AudioQueue().IsFinished()
+           && AudioQueue().GetSize() == 0
+           && !mMediaSink->HasUnplayedFrames(TrackInfo::kAudioTrack);
 }
 
 bool MediaDecoderStateMachine::HasLowBufferedData()
 {
   MOZ_ASSERT(OnTaskQueue());
   return HasLowBufferedData(detail::LOW_DATA_THRESHOLD_USECS);
 }
 
@@ -3189,24 +3248,25 @@ bool MediaDecoderStateMachine::HasLowBuf
   if (mBuffered.Ref().IsInvalid()) {
     return false;
   }
 
   // We are never low in decoded data when we don't have audio/video or have
   // decoded all audio/video samples.
   int64_t endOfDecodedVideoData =
     (HasVideo() && !VideoQueue().IsFinished())
-      ? mDecodedVideoEndTime
-      : INT64_MAX;
+    ? mDecodedVideoEndTime
+    : INT64_MAX;
   int64_t endOfDecodedAudioData =
     (HasAudio() && !AudioQueue().IsFinished())
-      ? mDecodedAudioEndTime
-      : INT64_MAX;
-
-  int64_t endOfDecodedData = std::min(endOfDecodedVideoData, endOfDecodedAudioData);
+    ? mDecodedAudioEndTime
+    : INT64_MAX;
+
+  int64_t endOfDecodedData =
+    std::min(endOfDecodedVideoData, endOfDecodedAudioData);
   if (Duration().ToMicroseconds() < endOfDecodedData) {
     // Our duration is not up to date. No point buffering.
     return false;
   }
 
   if (endOfDecodedData == INT64_MAX) {
     // Have decoded all samples. No point buffering.
     return false;
@@ -3266,17 +3326,18 @@ MediaDecoderStateMachine::FinishDecodeFi
   MOZ_ASSERT(OnTaskQueue());
   MOZ_ASSERT(!mSentFirstFrameLoadedEvent);
   DECODER_LOG("FinishDecodeFirstFrame");
 
   mMediaSink->Redraw(Info().mVideo);
 
   DECODER_LOG("Media duration %lld, "
               "transportSeekable=%d, mediaSeekable=%d",
-              Duration().ToMicroseconds(), mResource->IsTransportSeekable(), mMediaSeekable);
+              Duration().ToMicroseconds(), mResource->IsTransportSeekable(),
+              mMediaSeekable);
 
   // Get potentially updated metadata
   mReader->ReadUpdatedMetadata(mInfo.ptr());
 
   EnqueueFirstFrameLoadedEvent();
 }
 
 RefPtr<ShutdownPromise>
@@ -3359,17 +3420,18 @@ MediaDecoderStateMachine::UpdatePlayback
   if (VideoEndTime() != -1 || AudioEndTime() != -1) {
 
     const int64_t clockTime = GetClock();
     // Skip frames up to the frame at the playback position, and figure out
     // the time remaining until it's time to display the next frame and drop
     // the current frame.
     NS_ASSERTION(clockTime >= 0, "Should have positive clock time.");
 
-    // These will be non -1 if we've displayed a video frame, or played an audio frame.
+    // These will be non -1 if we've displayed a video frame, or played an audio
+    // frame.
     int64_t t = std::min(clockTime, std::max(VideoEndTime(), AudioEndTime()));
     // FIXME: Bug 1091422 - chained ogg files hit this assertion.
     //MOZ_ASSERT(t >= GetMediaTime());
     if (t > GetMediaTime()) {
       UpdatePlaybackPosition(t);
     }
   }
   // Note we have to update playback position before releasing the monitor.
@@ -3380,21 +3442,26 @@ MediaDecoderStateMachine::UpdatePlayback
   int64_t delay = std::max<int64_t>(1, AUDIO_DURATION_USECS / mPlaybackRate);
   ScheduleStateMachineIn(delay);
 }
 
 /* static */ const char*
 MediaDecoderStateMachine::ToStr(NextFrameStatus aStatus)
 {
   switch (aStatus) {
-    case MediaDecoderOwner::NEXT_FRAME_AVAILABLE: return "NEXT_FRAME_AVAILABLE";
-    case MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE: return "NEXT_FRAME_UNAVAILABLE";
-    case MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING: return "NEXT_FRAME_UNAVAILABLE_BUFFERING";
-    case MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING: return "NEXT_FRAME_UNAVAILABLE_SEEKING";
-    case MediaDecoderOwner::NEXT_FRAME_UNINITIALIZED: return "NEXT_FRAME_UNINITIALIZED";
+    case MediaDecoderOwner::NEXT_FRAME_AVAILABLE:
+      return "NEXT_FRAME_AVAILABLE";
+    case MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE:
+      return "NEXT_FRAME_UNAVAILABLE";
+    case MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING:
+      return "NEXT_FRAME_UNAVAILABLE_BUFFERING";
+    case MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING:
+      return "NEXT_FRAME_UNAVAILABLE_SEEKING";
+    case MediaDecoderOwner::NEXT_FRAME_UNINITIALIZED:
+      return "NEXT_FRAME_UNINITIALIZED";
   }
   return "UNKNOWN";
 }
 
 void
 MediaDecoderStateMachine::UpdateNextFrameStatus(NextFrameStatus aStatus)
 {
   MOZ_ASSERT(OnTaskQueue());
@@ -3411,17 +3478,18 @@ MediaDecoderStateMachine::CanPlayThrough
   return GetStatistics().CanPlayThrough();
 }
 
 MediaStatistics
 MediaDecoderStateMachine::GetStatistics()
 {
   MOZ_ASSERT(OnTaskQueue());
   MediaStatistics result;
-  result.mDownloadRate = mResource->GetDownloadRate(&result.mDownloadRateReliable);
+  result.mDownloadRate =
+    mResource->GetDownloadRate(&result.mDownloadRateReliable);
   result.mDownloadPosition = mResource->GetCachedDataEnd(mDecoderPosition);
   result.mTotalBytes = mResource->GetLength();
   result.mPlaybackRate = mPlaybackBytesPerSecond;
   result.mPlaybackRateReliable = mPlaybackRateReliable;
   result.mDecoderPosition = mDecoderPosition;
   result.mPlaybackPosition = mPlaybackOffset;
   return result;
 }
@@ -3430,25 +3498,26 @@ void
 MediaDecoderStateMachine::ScheduleStateMachine()
 {
   MOZ_ASSERT(OnTaskQueue());
   if (mDispatchedStateMachine) {
     return;
   }
   mDispatchedStateMachine = true;
 
-  OwnerThread()->Dispatch(NewRunnableMethod(this, &MediaDecoderStateMachine::RunStateMachine));
+  OwnerThread()->Dispatch(
+    NewRunnableMethod(this, &MediaDecoderStateMachine::RunStateMachine));
 }
 
 void
 MediaDecoderStateMachine::ScheduleStateMachineIn(int64_t aMicroseconds)
 {
-  MOZ_ASSERT(OnTaskQueue());          // mDelayedScheduler.Ensure() may Disconnect()
-                                      // the promise, which must happen on the state
-                                      // machine task queue.
+  MOZ_ASSERT(OnTaskQueue()); // mDelayedScheduler.Ensure() may Disconnect()
+                             // the promise, which must happen on the state
+                             // machine task queue.
   MOZ_ASSERT(aMicroseconds > 0);
   if (mDispatchedStateMachine) {
     return;
   }
 
   TimeStamp now = TimeStamp::Now();
   TimeStamp target = now + TimeDuration::FromMicroseconds(aMicroseconds);
 
@@ -3630,46 +3699,47 @@ MediaDecoderStateMachine::SetAudioCaptur
 
   // Restore playback parameters.
   mMediaSink->SetPlaybackParams(params);
 
   mAudioCaptured = aCaptured;
 
   // Don't buffer as much when audio is captured because we don't need to worry
   // about high latency audio devices.
-  mAmpleAudioThresholdUsecs = mAudioCaptured ?
-                              detail::AMPLE_AUDIO_USECS / 2 :
-                              detail::AMPLE_AUDIO_USECS;
+  mAmpleAudioThresholdUsecs =
+    mAudioCaptured ? detail::AMPLE_AUDIO_USECS / 2 : detail::AMPLE_AUDIO_USECS;
 
   mStateObj->HandleAudioCaptured();
 }
 
 uint32_t MediaDecoderStateMachine::GetAmpleVideoFrames() const
 {
   MOZ_ASSERT(OnTaskQueue());
   return (mReader->IsAsync() && mReader->VideoIsHardwareAccelerated())
-    ? std::max<uint32_t>(sVideoQueueHWAccelSize, MIN_VIDEO_QUEUE_SIZE)
-    : std::max<uint32_t>(sVideoQueueDefaultSize, MIN_VIDEO_QUEUE_SIZE);
+         ? std::max<uint32_t>(sVideoQueueHWAccelSize, MIN_VIDEO_QUEUE_SIZE)
+         : std::max<uint32_t>(sVideoQueueDefaultSize, MIN_VIDEO_QUEUE_SIZE);
 }
 
 nsCString
 MediaDecoderStateMachine::GetDebugInfo()
 {
   MOZ_ASSERT(OnTaskQueue());
   return nsPrintfCString(
-    "GetMediaTime=%lld GetClock=%lld mMediaSink=%p "
-    "state=%s mPlayState=%d mSentFirstFrameLoadedEvent=%d IsPlaying=%d "
-    "mAudioStatus=%s mVideoStatus=%s mDecodedAudioEndTime=%lld mDecodedVideoEndTime=%lld "
-    "mAudioCompleted=%d mVideoCompleted=%d ",
-    GetMediaTime(), mMediaSink->IsStarted() ? GetClock() : -1, mMediaSink.get(),
-    ToStateStr(), mPlayState.Ref(), mSentFirstFrameLoadedEvent, IsPlaying(),
-    AudioRequestStatus(), VideoRequestStatus(), mDecodedAudioEndTime, mDecodedVideoEndTime,
-    mAudioCompleted, mVideoCompleted)
-    + mStateObj->GetDebugInfo() + nsCString("\n")
-    + mMediaSink->GetDebugInfo();
+           "GetMediaTime=%lld GetClock=%lld mMediaSink=%p "
+           "state=%s mPlayState=%d mSentFirstFrameLoadedEvent=%d IsPlaying=%d "
+           "mAudioStatus=%s mVideoStatus=%s mDecodedAudioEndTime=%lld "
+           "mDecodedVideoEndTime=%lld "
+           "mAudioCompleted=%d mVideoCompleted=%d ",
+           GetMediaTime(), mMediaSink->IsStarted() ? GetClock() : -1,
+           mMediaSink.get(), ToStateStr(), mPlayState.Ref(),
+           mSentFirstFrameLoadedEvent, IsPlaying(), AudioRequestStatus(),
+           VideoRequestStatus(), mDecodedAudioEndTime, mDecodedVideoEndTime,
+           mAudioCompleted, mVideoCompleted)
+         + mStateObj->GetDebugInfo() + nsCString("\n")
+         + mMediaSink->GetDebugInfo();
 }
 
 RefPtr<MediaDecoder::DebugInfoPromise>
 MediaDecoderStateMachine::RequestDebugInfo()
 {
   using PromiseType = MediaDecoder::DebugInfoPromise;
   RefPtr<PromiseType::Private> p = new PromiseType::Private(__func__);
   OwnerThread()->Dispatch(NS_NewRunnableFunction([this, p] () {
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -109,17 +109,18 @@ class AudioSegment;
 class DecodedStream;
 class MediaDecoderReaderWrapper;
 class OutputStreamManager;
 class TaskQueue;
 
 extern LazyLogModule gMediaDecoderLog;
 extern LazyLogModule gMediaSampleLog;
 
-enum class MediaEventType : int8_t {
+enum class MediaEventType : int8_t
+{
   PlaybackStarted,
   PlaybackStopped,
   PlaybackEnded,
   SeekStarted,
   Invalidate,
   EnterVideoSuspend,
   ExitVideoSuspend
 };
@@ -145,17 +146,18 @@ public:
   typedef MediaDecoderOwner::NextFrameStatus NextFrameStatus;
   typedef mozilla::layers::ImageContainer::FrameID FrameID;
   MediaDecoderStateMachine(MediaDecoder* aDecoder,
                            MediaDecoderReader* aReader);
 
   nsresult Init(MediaDecoder* aDecoder);
 
   // Enumeration for the valid decoding states
-  enum State {
+  enum State
+  {
     DECODER_STATE_DECODING_METADATA,
     DECODER_STATE_WAIT_FOR_CDM,
     DECODER_STATE_DORMANT,
     DECODER_STATE_DECODING_FIRSTFRAME,
     DECODER_STATE_DECODING,
     DECODER_STATE_SEEKING,
     DECODER_STATE_BUFFERING,
     DECODER_STATE_COMPLETED,
@@ -425,17 +427,18 @@ protected:
   bool IsWaitingAudioData() const { return mAudioWaitRequest.Exists(); }
   bool IsWaitingVideoData() const { return mVideoWaitRequest.Exists(); }
 
   // Returns the "media time". This is the absolute time which the media
   // playback has reached. i.e. this returns values in the range
   // [mStartTime, mEndTime], and mStartTime will not be 0 if the media does
   // not start at 0. Note this is different than the "current playback position",
   // which is in the range [0,duration].
-  int64_t GetMediaTime() const {
+  int64_t GetMediaTime() const
+  {
     MOZ_ASSERT(OnTaskQueue());
     return mCurrentPosition;
   }
 
   // Returns an upper bound on the number of microseconds of audio that is
   // decoded and playable. This is the sum of the number of usecs of audio which
   // is decoded and in the reader's audio queue, and the usecs of unplayed audio
   // which has been pushed to the audio hardware for playback. Note that after
@@ -489,17 +492,21 @@ private:
   // the audio, decoder, state machine, and main threads.
   MediaQueue<MediaData> mAudioQueue;
   // Queue of video frames. This queue is threadsafe, and is accessed from
   // the decoder, state machine, and main threads.
   MediaQueue<MediaData> mVideoQueue;
 
   UniquePtr<StateObject> mStateObj;
 
-  media::TimeUnit Duration() const { MOZ_ASSERT(OnTaskQueue()); return mDuration.Ref().ref(); }
+  media::TimeUnit Duration() const
+  {
+    MOZ_ASSERT(OnTaskQueue());
+    return mDuration.Ref().ref();
+  }
 
   // Recomputes the canonical duration from various sources.
   void RecomputeDuration();
 
 
   // FrameID which increments every time a frame is pushed to our queue.
   FrameID mCurrentFrameID;
 
@@ -510,18 +517,18 @@ private:
   // Returns true if we're logically playing, that is, if the Play() has
   // been called and Pause() has not or we have not yet reached the end
   // of media. This is irrespective of the seeking state; if the owner
   // calls Play() and then Seek(), we still count as logically playing.
   // The decoder monitor must be held.
   bool IsLogicallyPlaying()
   {
     MOZ_ASSERT(OnTaskQueue());
-    return mPlayState == MediaDecoder::PLAY_STATE_PLAYING ||
-           mNextPlayState == MediaDecoder::PLAY_STATE_PLAYING;
+    return mPlayState == MediaDecoder::PLAY_STATE_PLAYING
+           || mNextPlayState == MediaDecoder::PLAY_STATE_PLAYING;
   }
 
   // Media Fragment end time in microseconds. Access controlled by decoder monitor.
   int64_t mFragmentEndTime;
 
   // The media sink resource.  Used on the state machine thread.
   RefPtr<media::MediaSink> mMediaSink;
 
@@ -746,31 +753,34 @@ private:
   Canonical<int64_t> mPlaybackOffset;
 
   // Used to distinguish whether the audio is producing sound.
   Canonical<bool> mIsAudioDataAudible;
 
 public:
   AbstractCanonical<media::TimeIntervals>* CanonicalBuffered() const;
 
-  AbstractCanonical<media::NullableTimeUnit>* CanonicalDuration() {
+  AbstractCanonical<media::NullableTimeUnit>* CanonicalDuration()
+  {
     return &mDuration;
   }
-  AbstractCanonical<bool>* CanonicalIsShutdown() {
-    return &mIsShutdown;
-  }
-  AbstractCanonical<NextFrameStatus>* CanonicalNextFrameStatus() {
+  AbstractCanonical<bool>* CanonicalIsShutdown() { return &mIsShutdown; }
+  AbstractCanonical<NextFrameStatus>* CanonicalNextFrameStatus()
+  {
     return &mNextFrameStatus;
   }
-  AbstractCanonical<int64_t>* CanonicalCurrentPosition() {
+  AbstractCanonical<int64_t>* CanonicalCurrentPosition()
+  {
     return &mCurrentPosition;
   }
-  AbstractCanonical<int64_t>* CanonicalPlaybackOffset() {
+  AbstractCanonical<int64_t>* CanonicalPlaybackOffset()
+  {
     return &mPlaybackOffset;
   }
-  AbstractCanonical<bool>* CanonicalIsAudioDataAudible() {
+  AbstractCanonical<bool>* CanonicalIsAudioDataAudible()
+  {
     return &mIsAudioDataAudible;
   }
 };
 
 } // namespace mozilla
 
 #endif
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -97,35 +97,34 @@ private:
   std::queue<RefPtr<PromisePrivate>> mPromises;
 };
 
 StaticMutex DecoderAllocPolicy::sMutex;
 
 class DecoderAllocPolicy::AutoDeallocToken : public Token
 {
 public:
-  explicit AutoDeallocToken(TrackType aTrack)
-    : mTrack(aTrack)
-  {}
+  explicit AutoDeallocToken(TrackType aTrack) : mTrack(aTrack) { }
 
 private:
   ~AutoDeallocToken()
   {
     DecoderAllocPolicy::Instance(mTrack).Dealloc();
   }
 
   const TrackType mTrack;
 };
 
 DecoderAllocPolicy::DecoderAllocPolicy(TrackType aTrack)
   : mMonitor("DecoderAllocPolicy::mMonitor")
   , mDecoderLimit(MediaPrefs::MediaDecoderLimit())
   , mTrack(aTrack)
 {
-  // Non DocGroup-version AbstractThread::MainThread is fine for ClearOnShutdown().
+  // Non DocGroup-version AbstractThread::MainThread is fine for
+  // ClearOnShutdown().
   AbstractThread::MainThread()->Dispatch(NS_NewRunnableFunction([this] () {
     ClearOnShutdown(this, ShutdownPhase::ShutdownThreads);
   }));
 }
 
 DecoderAllocPolicy::~DecoderAllocPolicy()
 {
   while (!mPromises.empty()) {
@@ -192,23 +191,23 @@ DecoderAllocPolicy::operator=(std::nullp
 
 class MediaFormatReader::DecoderFactory
 {
   using InitPromise = MediaDataDecoder::InitPromise;
   using TokenPromise = DecoderAllocPolicy::Promise;
   using Token = DecoderAllocPolicy::Token;
 
 public:
-  explicit DecoderFactory(MediaFormatReader* aOwner) : mOwner(aOwner) {}
+  explicit DecoderFactory(MediaFormatReader* aOwner) : mOwner(aOwner) { }
   void CreateDecoder(TrackType aTrack);
   // Shutdown any decoder pending initialization.
   RefPtr<ShutdownPromise> ShutdownDecoder(TrackType aTrack)
   {
-    MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack ||
-               aTrack == TrackInfo::kVideoTrack);
+    MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack
+               || aTrack == TrackInfo::kVideoTrack);
     auto& data = aTrack == TrackInfo::kAudioTrack ? mAudio : mVideo;
     data.mTokenRequest.DisconnectIfExists();
     data.mInitRequest.DisconnectIfExists();
     if (!data.mDecoder) {
       return ShutdownPromise::CreateAndResolve(true, __func__);
     }
     if (data.mShutdownRequest.Exists()) {
       // A shutdown is already in progress due to a prior initialization error,
@@ -248,18 +247,18 @@ private:
   void DoInitDecoder(TrackType aTrack);
 
   MediaFormatReader* const mOwner; // guaranteed to be valid by the owner.
 };
 
 void
 MediaFormatReader::DecoderFactory::CreateDecoder(TrackType aTrack)
 {
-  MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack ||
-             aTrack == TrackInfo::kVideoTrack);
+  MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack
+             || aTrack == TrackInfo::kVideoTrack);
   RunStage(aTrack);
 }
 
 class MediaFormatReader::DecoderFactory::Wrapper : public MediaDataDecoder
 {
   using Token = DecoderAllocPolicy::Token;
 
 public:
@@ -365,17 +364,18 @@ MediaFormatReader::DecoderFactory::RunSt
 
 MediaResult
 MediaFormatReader::DecoderFactory::DoCreateDecoder(TrackType aTrack)
 {
   auto& ownerData = mOwner->GetDecoderData(aTrack);
   auto& data = aTrack == TrackInfo::kAudioTrack ? mAudio : mVideo;
 
   auto decoderCreatingError = "error creating audio decoder";
-  MediaResult result = MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, decoderCreatingError);
+  MediaResult result =
+    MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, decoderCreatingError);
 
   if (!mOwner->mPlatform) {
     mOwner->mPlatform = new PDMFactory();
     if (mOwner->IsEncrypted()) {
       MOZ_ASSERT(mOwner->mCDMProxy);
       mOwner->mPlatform->SetCDMProxy(mOwner->mCDMProxy);
     }
   }
@@ -470,20 +470,20 @@ MediaFormatReader::DecoderFactory::DoIni
 // This ensure that the reader's taskqueue will never blocked while a demuxer
 // is itself blocked attempting to access the MediaCache or the MediaResource.
 class MediaFormatReader::DemuxerProxy
 {
   using TrackType = TrackInfo::TrackType;
   class Wrapper;
 
 public:
-  explicit DemuxerProxy(MediaDataDemuxer* aDemuxer, AbstractThread* mainThread)
+  explicit DemuxerProxy(MediaDataDemuxer* aDemuxer, AbstractThread* aMainThread)
     : mTaskQueue(new AutoTaskQueue(
                    GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER),
-                   mainThread))
+                   aMainThread))
     , mData(new Data(aDemuxer))
   {
     MOZ_COUNT_CTOR(DemuxerProxy);
   }
 
   ~DemuxerProxy()
   {
     MOZ_COUNT_DTOR(DemuxerProxy);
@@ -574,17 +574,18 @@ private:
   const RefPtr<AutoTaskQueue> mTaskQueue;
   struct Data
   {
     NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Data)
 
     explicit Data(MediaDataDemuxer* aDemuxer)
       : mInitDone(false)
       , mDemuxer(aDemuxer)
-    { }
+    {
+    }
 
     Atomic<bool> mInitDone;
     // Only ever accessed over mTaskQueue once.
     RefPtr<MediaDataDemuxer> mDemuxer;
     // Only accessed once InitPromise has been resolved and immutable after.
     // So we can safely access them without the use of the mutex.
     uint32_t mNumAudioTrack = 0;
     RefPtr<Wrapper> mAudioDemuxer;
@@ -604,17 +605,18 @@ class MediaFormatReader::DemuxerProxy::W
 {
 public:
   Wrapper(MediaTrackDemuxer* aTrackDemuxer, AutoTaskQueue* aTaskQueue)
     : mMutex("TrackDemuxer Mutex")
     , mTaskQueue(aTaskQueue)
     , mGetSamplesMayBlock(aTrackDemuxer->GetSamplesMayBlock())
     , mInfo(aTrackDemuxer->GetInfo())
     , mTrackDemuxer(aTrackDemuxer)
-  { }
+  {
+  }
 
   UniquePtr<TrackInfo> GetInfo() const override
   {
     if (!mInfo) {
       return nullptr;
     }
     return mInfo->Clone();
   }
@@ -1040,17 +1042,18 @@ public:
   DispatchKeyNeededEvent(AbstractMediaDecoder* aDecoder,
                          nsTArray<uint8_t>& aInitData,
                          const nsString& aInitDataType)
     : mDecoder(aDecoder)
     , mInitData(aInitData)
     , mInitDataType(aInitDataType)
   {
   }
-  NS_IMETHOD Run() override {
+  NS_IMETHOD Run() override
+  {
     // Note: Null check the owner, as the decoder could have been shutdown
     // since this event was dispatched.
     MediaDecoderOwner* owner = mDecoder->GetOwner();
     if (owner) {
       owner->DispatchEncrypted(mInitData, mInitDataType);
     }
     mDecoder = nullptr;
     return NS_OK;
@@ -1116,31 +1119,32 @@ MediaFormatReader::OnDemuxerInitDone(nsr
   UniquePtr<MetadataTags> tags(MakeUnique<MetadataTags>());
 
   RefPtr<PDMFactory> platform;
   if (!IsWaitingOnCDMResource()) {
     platform = new PDMFactory();
   }
 
   // To decode, we need valid video and a place to put it.
-  bool videoActive = !!mDemuxer->GetNumberTracks(TrackInfo::kVideoTrack) &&
-    GetImageContainer();
+  bool videoActive =
+    !!mDemuxer->GetNumberTracks(TrackInfo::kVideoTrack) && GetImageContainer();
 
   if (videoActive) {
     // We currently only handle the first video track.
     mVideo.mTrackDemuxer = mDemuxer->GetTrackDemuxer(TrackInfo::kVideoTrack, 0);
     if (!mVideo.mTrackDemuxer) {
       mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__);
       return;
     }
 
     UniquePtr<TrackInfo> videoInfo = mVideo.mTrackDemuxer->GetInfo();
     videoActive = videoInfo && videoInfo->IsValid();
     if (videoActive) {
-      if (platform && !platform->SupportsMimeType(videoInfo->mMimeType, nullptr)) {
+      if (platform
+          && !platform->SupportsMimeType(videoInfo->mMimeType, nullptr)) {
         // We have no decoder for this track. Error.
         mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__);
         return;
       }
       mInfo.mVideo = *videoInfo->GetAsVideoInfo();
       for (const MetadataTag& tag : videoInfo->mTags) {
         tags->Put(tag.mKey, tag.mValue);
       }
@@ -1157,19 +1161,21 @@ MediaFormatReader::OnDemuxerInitDone(nsr
     mAudio.mTrackDemuxer = mDemuxer->GetTrackDemuxer(TrackInfo::kAudioTrack, 0);
     if (!mAudio.mTrackDemuxer) {
       mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__);
       return;
     }
 
     UniquePtr<TrackInfo> audioInfo = mAudio.mTrackDemuxer->GetInfo();
     // We actively ignore audio tracks that we know we can't play.
-    audioActive = audioInfo && audioInfo->IsValid() &&
-                  (!platform ||
-                   platform->SupportsMimeType(audioInfo->mMimeType, nullptr));
+    audioActive =
+      audioInfo
+      && audioInfo->IsValid()
+      && (!platform || platform->SupportsMimeType(audioInfo->mMimeType,
+                                                  nullptr));
 
     if (audioActive) {
       mInfo.mAudio = *audioInfo->GetAsAudioInfo();
       for (const MetadataTag& tag : audioInfo->mTags) {
         tags->Put(tag.mKey, tag.mValue);
       }
       mAudio.mOriginalInfo = Move(audioInfo);
       mTrackDemuxersMayBlock |= mAudio.mTrackDemuxer->GetSamplesMayBlock();
@@ -1179,17 +1185,18 @@ MediaFormatReader::OnDemuxerInitDone(nsr
     }
   }
 
   UniquePtr<EncryptionInfo> crypto = mDemuxer->GetCrypto();
   if (mDecoder && crypto && crypto->IsEncrypted()) {
     // Try and dispatch 'encrypted'. Won't go if ready state still HAVE_NOTHING.
     for (uint32_t i = 0; i < crypto->mInitDatas.Length(); i++) {
       NS_DispatchToMainThread(
-        new DispatchKeyNeededEvent(mDecoder, crypto->mInitDatas[i].mInitData, crypto->mInitDatas[i].mType));
+        new DispatchKeyNeededEvent(mDecoder, crypto->mInitDatas[i].mInitData,
+                                   crypto->mInitDatas[i].mType));
     }
     mInfo.mCrypto = *crypto;
   }
 
   int64_t videoDuration = HasVideo() ? mInfo.mVideo.mDuration : 0;
   int64_t audioDuration = HasAudio() ? mInfo.mAudio.mDuration : 0;
 
   int64_t duration = std::max(videoDuration, audioDuration);
@@ -1229,18 +1236,18 @@ MediaFormatReader::OnDemuxerInitDone(nsr
   MaybeResolveMetadataPromise();
 }
 
 void
 MediaFormatReader::MaybeResolveMetadataPromise()
 {
   MOZ_ASSERT(OnTaskQueue());
 
-  if ((HasAudio() && mAudio.mFirstDemuxedSampleTime.isNothing()) ||
-      (HasVideo() && mVideo.mFirstDemuxedSampleTime.isNothing())) {
+  if ((HasAudio() && mAudio.mFirstDemuxedSampleTime.isNothing())
+      || (HasVideo() && mVideo.mFirstDemuxedSampleTime.isNothing())) {
     return;
   }
 
   TimeUnit startTime =
     std::min(mAudio.mFirstDemuxedSampleTime.refOr(TimeUnit::FromInfinity()),
              mVideo.mFirstDemuxedSampleTime.refOr(TimeUnit::FromInfinity()));
 
   if (!startTime.IsInfinite()) {
@@ -1257,18 +1264,18 @@ MediaFormatReader::MaybeResolveMetadataP
   UpdateBuffered();
 
   mMetadataPromise.Resolve(metadata, __func__);
 }
 
 bool
 MediaFormatReader::IsEncrypted() const
 {
-  return (HasAudio() && mInfo.mAudio.mCrypto.mValid) ||
-         (HasVideo() && mInfo.mVideo.mCrypto.mValid);
+  return (HasAudio() && mInfo.mAudio.mCrypto.mValid)
+         || (HasVideo() && mInfo.mVideo.mCrypto.mValid);
 }
 
 void
 MediaFormatReader::OnDemuxerInitFailed(const MediaResult& aError)
 {
   mDemuxerInitRequest.Complete();
   mMetadataPromise.Reject(aError, __func__);
 }
@@ -1277,71 +1284,78 @@ void
 MediaFormatReader::ReadUpdatedMetadata(MediaInfo* aInfo)
 {
   *aInfo = mInfo;
 }
 
 MediaFormatReader::DecoderData&
 MediaFormatReader::GetDecoderData(TrackType aTrack)
 {
-  MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack ||
-             aTrack == TrackInfo::kVideoTrack);
+  MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack
+             || aTrack == TrackInfo::kVideoTrack);
   if (aTrack == TrackInfo::kAudioTrack) {
     return mAudio;
   }
   return mVideo;
 }
 
 bool
-MediaFormatReader::ShouldSkip(bool aSkipToNextKeyframe, media::TimeUnit aTimeThreshold)
+MediaFormatReader::ShouldSkip(bool aSkipToNextKeyframe,
+                              media::TimeUnit aTimeThreshold)
 {
   MOZ_ASSERT(HasVideo());
   media::TimeUnit nextKeyframe;
   nsresult rv = mVideo.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe);
   if (NS_FAILED(rv)) {
     return aSkipToNextKeyframe;
   }
   return (nextKeyframe < aTimeThreshold ||
-          (mVideo.mTimeThreshold &&
-           mVideo.mTimeThreshold.ref().EndTime() < aTimeThreshold)) &&
-         nextKeyframe.ToMicroseconds() >= 0 && !nextKeyframe.IsInfinite();
+          (mVideo.mTimeThreshold
+           && mVideo.mTimeThreshold.ref().EndTime() < aTimeThreshold))
+         && nextKeyframe.ToMicroseconds() >= 0
+         && !nextKeyframe.IsInfinite();
 }
 
 RefPtr<MediaDecoderReader::MediaDataPromise>
 MediaFormatReader::RequestVideoData(bool aSkipToNextKeyframe,
                                     int64_t aTimeThreshold)
 {
   MOZ_ASSERT(OnTaskQueue());
-  MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty(), "No sample requests allowed while seeking");
+  MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty(),
+                        "No sample requests allowed while seeking");
   MOZ_DIAGNOSTIC_ASSERT(!mVideo.HasPromise(), "No duplicate sample requests");
-  MOZ_DIAGNOSTIC_ASSERT(!mVideo.mSeekRequest.Exists() ||
-                        mVideo.mTimeThreshold.isSome());
+  MOZ_DIAGNOSTIC_ASSERT(!mVideo.mSeekRequest.Exists()
+                        || mVideo.mTimeThreshold.isSome());
   MOZ_DIAGNOSTIC_ASSERT(!IsSeeking(), "called mid-seek");
   LOGV("RequestVideoData(%d, %lld)", aSkipToNextKeyframe, aTimeThreshold);
 
   if (!HasVideo()) {
     LOG("called with no video track");
-    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+                                             __func__);
   }
 
   if (IsSeeking()) {
     LOG("called mid-seek. Rejecting.");
-    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED,
+                                             __func__);
   }
 
   if (mShutdown) {
     NS_WARNING("RequestVideoData on shutdown MediaFormatReader!");
-    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED,
+                                             __func__);
   }
 
-  media::TimeUnit timeThreshold{media::TimeUnit::FromMicroseconds(aTimeThreshold)};
+  media::TimeUnit timeThreshold{ media::TimeUnit::FromMicroseconds(
+    aTimeThreshold) };
   // Ensure we have no pending seek going as ShouldSkip could return out of date
   // information.
-  if (!mVideo.HasInternalSeekPending() &&
-      ShouldSkip(aSkipToNextKeyframe, timeThreshold)) {
+  if (!mVideo.HasInternalSeekPending()
+      && ShouldSkip(aSkipToNextKeyframe, timeThreshold)) {
     RefPtr<MediaDataPromise> p = mVideo.EnsurePromise(__func__);
     SkipVideoDemuxToNextKeyFrame(timeThreshold);
     return p;
   }
 
   RefPtr<MediaDataPromise> p = mVideo.EnsurePromise(__func__);
   ScheduleUpdate(TrackInfo::kVideoTrack);
 
@@ -1398,52 +1412,58 @@ MediaFormatReader::DoDemuxVideo()
 
   p->Then(OwnerThread(), __func__, this,
           &MediaFormatReader::OnVideoDemuxCompleted,
           &MediaFormatReader::OnVideoDemuxFailed)
    ->Track(mVideo.mDemuxRequest);
 }
 
 void
-MediaFormatReader::OnVideoDemuxCompleted(RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
+MediaFormatReader::OnVideoDemuxCompleted(
+  RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
 {
   LOGV("%d video samples demuxed (sid:%d)",
        aSamples->mSamples.Length(),
-       aSamples->mSamples[0]->mTrackInfo ? aSamples->mSamples[0]->mTrackInfo->GetID() : 0);
+       aSamples->mSamples[0]->mTrackInfo
+       ? aSamples->mSamples[0]->mTrackInfo->GetID()
+       : 0);
   mVideo.mDemuxRequest.Complete();
   mVideo.mQueuedSamples.AppendElements(aSamples->mSamples);
   ScheduleUpdate(TrackInfo::kVideoTrack);
 }
 
 RefPtr<MediaDecoderReader::MediaDataPromise>
 MediaFormatReader::RequestAudioData()
 {
   MOZ_ASSERT(OnTaskQueue());
   MOZ_DIAGNOSTIC_ASSERT(!mAudio.HasPromise(), "No duplicate sample requests");
   MOZ_DIAGNOSTIC_ASSERT(IsVideoSeeking() || mSeekPromise.IsEmpty(),
                         "No sample requests allowed while seeking");
-  MOZ_DIAGNOSTIC_ASSERT(IsVideoSeeking() ||
-                        !mAudio.mSeekRequest.Exists() ||
-                        mAudio.mTimeThreshold.isSome());
+  MOZ_DIAGNOSTIC_ASSERT(IsVideoSeeking()
+                        || !mAudio.mSeekRequest.Exists()
+                        || mAudio.mTimeThreshold.isSome());
   MOZ_DIAGNOSTIC_ASSERT(IsVideoSeeking() || !IsSeeking(), "called mid-seek");
   LOGV("");
 
   if (!HasAudio()) {
     LOG("called with no audio track");
-    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+                                             __func__);
   }
 
   if (IsSeeking()) {
     LOG("called mid-seek. Rejecting.");
-    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED,
+                                             __func__);
   }
 
   if (mShutdown) {
     NS_WARNING("RequestAudioData on shutdown MediaFormatReader!");
-    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED,
+                                             __func__);
   }
 
   RefPtr<MediaDataPromise> p = mAudio.EnsurePromise(__func__);
   ScheduleUpdate(TrackInfo::kAudioTrack);
 
   return p;
 }
 
@@ -1465,29 +1485,32 @@ MediaFormatReader::DoDemuxAudio()
 
   p->Then(OwnerThread(), __func__, this,
           &MediaFormatReader::OnAudioDemuxCompleted,
           &MediaFormatReader::OnAudioDemuxFailed)
    ->Track(mAudio.mDemuxRequest);
 }
 
 void
-MediaFormatReader::OnAudioDemuxCompleted(RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
+MediaFormatReader::OnAudioDemuxCompleted(
+  RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
 {
   LOGV("%d audio samples demuxed (sid:%d)",
        aSamples->mSamples.Length(),
-       aSamples->mSamples[0]->mTrackInfo ? aSamples->mSamples[0]->mTrackInfo->GetID() : 0);
+       aSamples->mSamples[0]->mTrackInfo
+       ? aSamples->mSamples[0]->mTrackInfo->GetID()
+       : 0);
   mAudio.mDemuxRequest.Complete();
   mAudio.mQueuedSamples.AppendElements(aSamples->mSamples);
   ScheduleUpdate(TrackInfo::kAudioTrack);
 }
 
 void
-MediaFormatReader::NotifyNewOutput(TrackType aTrack,
-                                   const MediaDataDecoder::DecodedData& aResults)
+MediaFormatReader::NotifyNewOutput(
+  TrackType aTrack, const MediaDataDecoder::DecodedData& aResults)
 {
   MOZ_ASSERT(OnTaskQueue());
   auto& decoder = GetDecoderData(aTrack);
   for (auto& sample : aResults) {
     LOGV("Received new %s sample time:%lld duration:%lld",
         TrackTypeToStr(aTrack), sample->mTime, sample->mDuration);
     decoder.mOutput.AppendElement(sample);
     decoder.mNumSamplesOutput++;
@@ -1555,23 +1578,23 @@ MediaFormatReader::NotifyEndOfStream(Tra
 }
 
 bool
 MediaFormatReader::NeedInput(DecoderData& aDecoder)
 {
   // The decoder will not be fed a new raw sample until the current decoding
   // requests has completed.
   return
-    (aDecoder.HasPromise() || aDecoder.mTimeThreshold.isSome()) &&
-    !aDecoder.HasPendingDrain() &&
-    !aDecoder.HasFatalError() &&
-    !aDecoder.mDemuxRequest.Exists() &&
-    !aDecoder.mOutput.Length() &&
-    !aDecoder.HasInternalSeekPending() &&
-    !aDecoder.mDecodeRequest.Exists();
+    (aDecoder.HasPromise() || aDecoder.mTimeThreshold.isSome())
+    && !aDecoder.HasPendingDrain()
+    && !aDecoder.HasFatalError()
+    && !aDecoder.mDemuxRequest.Exists()
+    && !aDecoder.mOutput.Length()
+    && !aDecoder.HasInternalSeekPending()
+    && !aDecoder.mDecodeRequest.Exists();
 }
 
 void
 MediaFormatReader::ScheduleUpdate(TrackType aTrack)
 {
   MOZ_ASSERT(OnTaskQueue());
   if (mShutdown) {
     return;
@@ -1629,27 +1652,28 @@ MediaFormatReader::UpdateReceivedNewData
     decoder.mTimeThreshold.ref().mWaiting = false;
   }
   decoder.mWaitingForData = false;
 
   if (decoder.HasFatalError()) {
     return false;
   }
 
-  if (!mSeekPromise.IsEmpty() &&
-      (!IsVideoSeeking() || aTrack == TrackInfo::kVideoTrack)) {
+  if (!mSeekPromise.IsEmpty()
+      && (!IsVideoSeeking() || aTrack == TrackInfo::kVideoTrack)) {
     MOZ_ASSERT(!decoder.HasPromise());
-    MOZ_DIAGNOSTIC_ASSERT((IsVideoSeeking() || !mAudio.mTimeThreshold) &&
-                          !mVideo.mTimeThreshold,
-                          "InternalSeek must have been aborted when Seek was first called");
-    MOZ_DIAGNOSTIC_ASSERT((IsVideoSeeking() || !mAudio.HasWaitingPromise()) &&
-                          !mVideo.HasWaitingPromise(),
-                          "Waiting promises must have been rejected when Seek was first called");
-    if (mVideo.mSeekRequest.Exists() ||
-        (!IsVideoSeeking() && mAudio.mSeekRequest.Exists())) {
+    MOZ_DIAGNOSTIC_ASSERT(
+      (IsVideoSeeking() || !mAudio.mTimeThreshold) && !mVideo.mTimeThreshold,
+      "InternalSeek must have been aborted when Seek was first called");
+    MOZ_DIAGNOSTIC_ASSERT(
+      (IsVideoSeeking() || !mAudio.HasWaitingPromise())
+      && !mVideo.HasWaitingPromise(),
+      "Waiting promises must have been rejected when Seek was first called");
+    if (mVideo.mSeekRequest.Exists()
+        || (!IsVideoSeeking() && mAudio.mSeekRequest.Exists())) {
       // Already waiting for a seek to complete. Nothing more to do.
       return true;
     }
     LOG("Attempting Seek");
     ScheduleSeek();
     return true;
   }
   if (decoder.HasInternalSeekPending() || decoder.HasWaitingPromise()) {
@@ -1712,18 +1736,18 @@ MediaFormatReader::DecodeDemuxedSamples(
            [self, this, aTrack, &decoder](const MediaResult& aError) {
              decoder.mDecodeRequest.Complete();
              NotifyError(aTrack, aError);
            })
     ->Track(decoder.mDecodeRequest);
 }
 
 void
-MediaFormatReader::HandleDemuxedSamples(TrackType aTrack,
-                                        AbstractMediaDecoder::AutoNotifyDecoded& aA)
+MediaFormatReader::HandleDemuxedSamples(
+  TrackType aTrack, AbstractMediaDecoder::AutoNotifyDecoded& aA)
 {
   MOZ_ASSERT(OnTaskQueue());
 
   auto& decoder = GetDecoderData(aTrack);
 
   if (decoder.mFlushRequest.Exists() || decoder.mShutdownRequest.Exists()) {
     LOGV("Decoder operation in progress, let it complete.");
     return;
@@ -1741,18 +1765,18 @@ MediaFormatReader::HandleDemuxedSamples(
   LOGV("Giving %s input to decoder", TrackTypeToStr(aTrack));
 
   // Decode all our demuxed frames.
   while (decoder.mQueuedSamples.Length()) {
     RefPtr<MediaRawData> sample = decoder.mQueuedSamples[0];
     RefPtr<SharedTrackInfo> info = sample->mTrackInfo;
 
     if (info && decoder.mLastStreamSourceID != info->GetID()) {
-      bool supportRecycling = MediaPrefs::MediaDecoderCheckRecycling() &&
-                              decoder.mDecoder->SupportDecoderRecycling();
+      bool supportRecycling = MediaPrefs::MediaDecoderCheckRecycling()
+                              && decoder.mDecoder->SupportDecoderRecycling();
       if (decoder.mNextStreamSourceID.isNothing() ||
           decoder.mNextStreamSourceID.ref() != info->GetID()) {
         if (!supportRecycling) {
           LOG("%s stream id has changed from:%d to:%d, draining decoder.",
             TrackTypeToStr(aTrack), decoder.mLastStreamSourceID,
             info->GetID());
           decoder.mNeedDraining = true;
           decoder.mNextStreamSourceID = Some(info->GetID());
@@ -1805,34 +1829,36 @@ MediaFormatReader::HandleDemuxedSamples(
     DecodeDemuxedSamples(aTrack, sample);
 
     decoder.mQueuedSamples.RemoveElementAt(0);
     break;
   }
 }
 
 void
-MediaFormatReader::InternalSeek(TrackType aTrack, const InternalSeekTarget& aTarget)
+MediaFormatReader::InternalSeek(TrackType aTrack,
+                                const InternalSeekTarget& aTarget)
 {
   MOZ_ASSERT(OnTaskQueue());
   LOG("%s internal seek to %f",
       TrackTypeToStr(aTrack), aTarget.Time().ToSeconds());
 
   auto& decoder = GetDecoderData(aTrack);
   decoder.Flush();
   decoder.ResetDemuxer();
   decoder.mTimeThreshold = Some(aTarget);
   RefPtr<MediaFormatReader> self = this;
   decoder.mTrackDemuxer->Seek(decoder.mTimeThreshold.ref().Time())
     ->Then(OwnerThread(), __func__,
            [self, aTrack] (media::TimeUnit aTime) {
              auto& decoder = self->GetDecoderData(aTrack);
              decoder.mSeekRequest.Complete();
-             MOZ_ASSERT(decoder.mTimeThreshold,
-                        "Seek promise must be disconnected when timethreshold is reset");
+             MOZ_ASSERT(
+               decoder.mTimeThreshold,
+               "Seek promise must be disconnected when timethreshold is reset");
              decoder.mTimeThreshold.ref().mHasSeeked = true;
              self->SetVideoDecodeThreshold();
              self->ScheduleUpdate(aTrack);
            },
            [self, aTrack] (const MediaResult& aError) {
              auto& decoder = self->GetDecoderData(aTrack);
              decoder.mSeekRequest.Complete();
              switch (aError.Code()) {
@@ -1919,20 +1945,20 @@ MediaFormatReader::Update(TrackType aTra
     return;
   }
 
   if (decoder.mSeekRequest.Exists()) {
     LOGV("Seeking hasn't completed, nothing more to do");
     return;
   }
 
-  MOZ_DIAGNOSTIC_ASSERT(!decoder.HasInternalSeekPending() ||
-                        (!decoder.mOutput.Length() &&
-                         !decoder.mQueuedSamples.Length()),
-                        "No frames can be demuxed or decoded while an internal seek is pending");
+  MOZ_DIAGNOSTIC_ASSERT(
+    !decoder.HasInternalSeekPending()
+    || (!decoder.mOutput.Length() && !decoder.mQueuedSamples.Length()),
+    "No frames can be demuxed or decoded while an internal seek is pending");
 
   // Record number of frames decoded and parsed. Automatically update the
   // stats counters using the AutoNotifyDecoded stack-based class.
   AbstractMediaDecoder::AutoNotifyDecoded a(mDecoder);
 
   // Drop any frames found prior our internal seek target.
   while (decoder.mTimeThreshold && decoder.mOutput.Length()) {
     RefPtr<MediaData>& output = decoder.mOutput[0];
@@ -1950,17 +1976,18 @@ MediaFormatReader::Update(TrackType aTra
            media::TimeUnit::FromMicroseconds(output->mTime).ToSeconds(),
            target.Time().ToSeconds(),
            output->mKeyframe);
       decoder.mOutput.RemoveElementAt(0);
       decoder.mSizeOfQueue -= 1;
     }
   }
 
-  while (decoder.mOutput.Length() && decoder.mOutput[0]->mType == MediaData::NULL_DATA) {
+  while (decoder.mOutput.Length()
+         && decoder.mOutput[0]->mType == MediaData::NULL_DATA) {
     LOGV("Dropping null data. Time: %lld", decoder.mOutput[0]->mTime);
     decoder.mOutput.RemoveElementAt(0);
     decoder.mSizeOfQueue -= 1;
   }
 
   if (decoder.HasPromise()) {
     needOutput = true;
     if (decoder.mOutput.Length()) {
@@ -1976,17 +2003,18 @@ MediaFormatReader::Update(TrackType aTra
       if (aTrack == TrackType::kVideoTrack) {
         uint64_t delta =
           decoder.mNumSamplesOutputTotal - mLastReportedNumDecodedFrames;
         a.mStats.mDecodedFrames = static_cast<uint32_t>(delta);
         mLastReportedNumDecodedFrames = decoder.mNumSamplesOutputTotal;
         if (output->mKeyframe) {
           if (mPreviousDecodedKeyframeTime_us < output->mTime) {
             // There is a previous keyframe -> Record inter-keyframe stats.
-            uint64_t segment_us = output->mTime - mPreviousDecodedKeyframeTime_us;
+            uint64_t segment_us =
+              output->mTime - mPreviousDecodedKeyframeTime_us;
             a.mStats.mInterKeyframeSum_us += segment_us;
             a.mStats.mInterKeyframeCount += 1;
             if (a.mStats.mInterKeyFrameMax_us < segment_us) {
               a.mStats.mInterKeyFrameMax_us = segment_us;
             }
           }
           mPreviousDecodedKeyframeTime_us = output->mTime;
         }
@@ -2008,17 +2036,18 @@ MediaFormatReader::Update(TrackType aTra
       } else if (decoder.mWaitingForData) {
         if (wasDraining && decoder.mLastSampleTime &&
             !decoder.mNextStreamSourceID) {
           // We have completed draining the decoder following WaitingForData.
           // Set up the internal seek machinery to be able to resume from the
           // last sample decoded.
           LOG("Seeking to last sample time: %lld",
               decoder.mLastSampleTime.ref().mStart.ToMicroseconds());
-          InternalSeek(aTrack, InternalSeekTarget(decoder.mLastSampleTime.ref(), true));
+          InternalSeek(aTrack,
+                       InternalSeekTarget(decoder.mLastSampleTime.ref(), true));
         }
         if (!decoder.mReceivedNewData) {
           LOG("Rejecting %s promise: WAITING_FOR_DATA", TrackTypeToStr(aTrack));
           decoder.RejectPromise(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA, __func__);
         }
       }
       // Now that draining has completed, we check if we have received
       // new data again as the result may now be different from the earlier
@@ -2043,31 +2072,35 @@ MediaFormatReader::Update(TrackType aTra
   }
 
   if (decoder.mNeedDraining) {
     DrainDecoder(aTrack);
     return;
   }
 
   if (decoder.mError && !decoder.HasFatalError()) {
-    bool needsNewDecoder = decoder.mError.ref() == NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER;
-    if (!needsNewDecoder && ++decoder.mNumOfConsecutiveError > decoder.mMaxConsecutiveError) {
+    bool needsNewDecoder =
+      decoder.mError.ref() == NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER;
+    if (!needsNewDecoder
+        && ++decoder.mNumOfConsecutiveError > decoder.mMaxConsecutiveError) {
       NotifyError(aTrack, decoder.mError.ref());
       return;
     }
     decoder.mError.reset();
     LOG("%s decoded error count %d", TrackTypeToStr(aTrack),
                                      decoder.mNumOfConsecutiveError);
     media::TimeUnit nextKeyframe;
-    if (aTrack == TrackType::kVideoTrack && !decoder.HasInternalSeekPending() &&
-        NS_SUCCEEDED(decoder.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe))) {
+    if (aTrack == TrackType::kVideoTrack && !decoder.HasInternalSeekPending()
+        && NS_SUCCEEDED(
+          decoder.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe))) {
       if (needsNewDecoder) {
         ShutdownDecoder(aTrack);
       }
-      SkipVideoDemuxToNextKeyFrame(decoder.mLastSampleTime.refOr(TimeInterval()).Length());
+      SkipVideoDemuxToNextKeyFrame(
+        decoder.mLastSampleTime.refOr(TimeInterval()).Length());
     } else if (aTrack == TrackType::kAudioTrack) {
       decoder.Flush();
     }
     return;
   }
 
   bool needInput = NeedInput(decoder);
 
@@ -2307,17 +2340,18 @@ MediaFormatReader::OnVideoSkipCompleted(
   mSkipRequest.Complete();
 
   VideoSkipReset(aSkipped);
 
   ScheduleUpdate(TrackInfo::kVideoTrack);
 }
 
 void
-MediaFormatReader::OnVideoSkipFailed(MediaTrackDemuxer::SkipFailureHolder aFailure)
+MediaFormatReader::OnVideoSkipFailed(
+  MediaTrackDemuxer::SkipFailureHolder aFailure)
 {
   MOZ_ASSERT(OnTaskQueue());
   LOG("Skipping failed, skipped %u frames", aFailure.mSkipped);
   mSkipRequest.Complete();
 
   switch (aFailure.mFailure.Code()) {
     case NS_ERROR_DOM_MEDIA_END_OF_STREAM:
     case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA:
@@ -2346,17 +2380,18 @@ MediaFormatReader::Seek(const SeekTarget
 
   LOG("aTarget=(%lld)", aTarget.GetTime().ToMicroseconds());
 
   MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty());
   MOZ_DIAGNOSTIC_ASSERT(!mVideo.HasPromise());
   MOZ_DIAGNOSTIC_ASSERT(aTarget.IsVideoOnly() || !mAudio.HasPromise());
   MOZ_DIAGNOSTIC_ASSERT(mPendingSeekTime.isNothing());
   MOZ_DIAGNOSTIC_ASSERT(mVideo.mTimeThreshold.isNothing());
-  MOZ_DIAGNOSTIC_ASSERT(aTarget.IsVideoOnly() || mAudio.mTimeThreshold.isNothing());
+  MOZ_DIAGNOSTIC_ASSERT(aTarget.IsVideoOnly()
+                        || mAudio.mTimeThreshold.isNothing());
 
   if (!mInfo.mMediaSeekable && !mInfo.mMediaSeekableOnlyInBufferedRanges) {
     LOG("Seek() END (Unseekable)");
     return SeekPromise::CreateAndReject(NS_ERROR_FAILURE, __func__);
   }
 
   if (mShutdown) {
     return SeekPromise::CreateAndReject(NS_ERROR_FAILURE, __func__);
@@ -2382,17 +2417,18 @@ MediaFormatReader::SetSeekTarget(const S
 
 void
 MediaFormatReader::ScheduleSeek()
 {
   if (mSeekScheduled) {
     return;
   }
   mSeekScheduled = true;
-  OwnerThread()->Dispatch(NewRunnableMethod(this, &MediaFormatReader::AttemptSeek));
+  OwnerThread()->Dispatch(
+    NewRunnableMethod(this, &MediaFormatReader::AttemptSeek));
 }
 
 void
 MediaFormatReader::AttemptSeek()
 {
   MOZ_ASSERT(OnTaskQueue());
 
   mSeekScheduled = false;
@@ -2430,19 +2466,20 @@ MediaFormatReader::OnSeekFailed(TrackTyp
   LOGV("%s failure:%u", TrackTypeToStr(aTrack), aError.Code());
   if (aTrack == TrackType::kVideoTrack) {
     mVideo.mSeekRequest.Complete();
   } else {
     mAudio.mSeekRequest.Complete();
   }
 
   if (aError == NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA) {
-    if (HasVideo() && aTrack == TrackType::kAudioTrack &&
-        mFallbackSeekTime.isSome() &&
-        mPendingSeekTime.ref() != mFallbackSeekTime.ref()) {
+    if (HasVideo()
+        && aTrack == TrackType::kAudioTrack
+        && mFallbackSeekTime.isSome()
+        && mPendingSeekTime.ref() != mFallbackSeekTime.ref()) {
       // We have failed to seek audio where video seeked to earlier.
       // Attempt to seek instead to the closest point that we know we have in
       // order to limit A/V sync discrepency.
 
       // Ensure we have the most up to date buffered ranges.
       UpdateReceivedNewData(TrackType::kAudioTrack);
       Maybe<media::TimeUnit> nextSeekTime;
       // Find closest buffered time found after video seeked time.
@@ -2463,18 +2500,18 @@ MediaFormatReader::OnSeekFailed(TrackTyp
       DoAudioSeek();
       return;
     }
     NotifyWaitingForData(aTrack);
   }
   MOZ_ASSERT(!mVideo.mSeekRequest.Exists() && !mAudio.mSeekRequest.Exists());
   mPendingSeekTime.reset();
 
-  auto type = aTrack == TrackType::kAudioTrack
-    ? MediaData::AUDIO_DATA : MediaData::VIDEO_DATA;
+  auto type = aTrack == TrackType::kAudioTrack ? MediaData::AUDIO_DATA
+                                               : MediaData::VIDEO_DATA;
   mSeekPromise.Reject(SeekRejectValue(type, aError), __func__);
 }
 
 void
 MediaFormatReader::DoVideoSeek()
 {
   MOZ_ASSERT(mPendingSeekTime.isSome());
   LOGV("Seeking video to %lld", mPendingSeekTime.ref().ToMicroseconds());
@@ -2622,18 +2659,19 @@ MediaFormatReader::NotifyTrackDemuxers()
   }
 }
 
 void
 MediaFormatReader::NotifyDataArrived()
 {
   MOZ_ASSERT(OnTaskQueue());
 
-  if (mShutdown || !mDemuxer ||
-      (!mDemuxerInitDone && !mDemuxerInitRequest.Exists())) {
+  if (mShutdown
+      || !mDemuxer
+      || (!mDemuxerInitDone && !mDemuxerInitRequest.Exists())) {
     return;
   }
 
   if (mNotifyDataArrivedPromise.Exists()) {
     // Already one in progress. Reschedule for later.
     RefPtr<nsIRunnable> task(
         NewRunnableMethod(this, &MediaFormatReader::NotifyDataArrived));
     OwnerThread()->Dispatch(task.forget());
@@ -2712,18 +2750,18 @@ MediaFormatReader::UpdateBuffered()
     mBuffered =
       intervals.Shift(media::TimeUnit() - mInfo.mStartTime);
   }
 }
 
 layers::ImageContainer*
 MediaFormatReader::GetImageContainer()
 {
-  return mVideoFrameContainer
-    ? mVideoFrameContainer->GetImageContainer() : nullptr;
+  return mVideoFrameContainer ? mVideoFrameContainer->GetImageContainer()
+                              : nullptr;
 }
 
 void
 MediaFormatReader::GetMozDebugReaderData(nsACString& aString)
 {
   nsAutoCString result;
   const char* audioName = "unavailable";
   const char* videoName = audioName;
@@ -2736,54 +2774,47 @@ MediaFormatReader::GetMozDebugReaderData
     MutexAutoLock mon(mVideo.mMutex);
     videoName = mVideo.mDescription;
   }
 
   result += nsPrintfCString("audio decoder: %s\n", audioName);
   result += nsPrintfCString("audio frames decoded: %lld\n",
                             mAudio.mNumSamplesOutputTotal);
   if (HasAudio()) {
-    result += nsPrintfCString("audio state: ni=%d no=%d demuxr:%d demuxq:%d tt:%f tths:%d in:%llu out:%llu qs=%u pending:%u waiting:%d sid:%u\n",
-                              NeedInput(mAudio), mAudio.HasPromise(),
-                              mAudio.mDemuxRequest.Exists(),
-                              int(mAudio.mQueuedSamples.Length()),
-                              mAudio.mTimeThreshold
-                              ? mAudio.mTimeThreshold.ref().Time().ToSeconds()
-                              : -1.0,
-                              mAudio.mTimeThreshold
-                              ? mAudio.mTimeThreshold.ref().mHasSeeked
-                              : -1,
-                              mAudio.mNumSamplesInput, mAudio.mNumSamplesOutput,
-                              unsigned(size_t(mAudio.mSizeOfQueue)),
-                              unsigned(mAudio.mOutput.Length()),
-                              mAudio.mWaitingForData,
-                              mAudio.mLastStreamSourceID);
+    result += nsPrintfCString(
+      "audio state: ni=%d no=%d demuxr:%d demuxq:%d tt:%f tths:%d in:%llu "
+      "out:%llu qs=%u pending:%u waiting:%d sid:%u\n",
+      NeedInput(mAudio), mAudio.HasPromise(), mAudio.mDemuxRequest.Exists(),
+      int(mAudio.mQueuedSamples.Length()),
+      mAudio.mTimeThreshold ? mAudio.mTimeThreshold.ref().Time().ToSeconds()
+                            : -1.0,
+      mAudio.mTimeThreshold ? mAudio.mTimeThreshold.ref().mHasSeeked : -1,
+      mAudio.mNumSamplesInput, mAudio.mNumSamplesOutput,
+      unsigned(size_t(mAudio.mSizeOfQueue)), unsigned(mAudio.mOutput.Length()),
+      mAudio.mWaitingForData, mAudio.mLastStreamSourceID);
   }
   result += nsPrintfCString("video decoder: %s\n", videoName);
-  result += nsPrintfCString("hardware video decoding: %s\n",
-                            VideoIsHardwareAccelerated() ? "enabled" : "disabled");
+  result +=
+    nsPrintfCString("hardware video decoding: %s\n",
+                    VideoIsHardwareAccelerated() ? "enabled" : "disabled");
   result += nsPrintfCString("video frames decoded: %lld (skipped:%lld)\n",
                             mVideo.mNumSamplesOutputTotal,
                             mVideo.mNumSamplesSkippedTotal);
   if (HasVideo()) {
-    result += nsPrintfCString("video state: ni=%d no=%d demuxr:%d demuxq:%d tt:%f tths:%d in:%llu out:%llu qs=%u pending:%u waiting:%d sid:%u\n",
-                              NeedInput(mVideo), mVideo.HasPromise(),
-                              mVideo.mDemuxRequest.Exists(),
-                              int(mVideo.mQueuedSamples.Length()),
-                              mVideo.mTimeThreshold
-                              ? mVideo.mTimeThreshold.ref().Time().ToSeconds()
-                              : -1.0,
-                              mVideo.mTimeThreshold
-                              ? mVideo.mTimeThreshold.ref().mHasSeeked
-                              : -1,
-                              mVideo.mNumSamplesInput, mVideo.mNumSamplesOutput,
-                              unsigned(size_t(mVideo.mSizeOfQueue)),
-                              unsigned(mVideo.mOutput.Length()),
-                              mVideo.mWaitingForData,
-                              mVideo.mLastStreamSourceID);
+    result += nsPrintfCString(
+      "video state: ni=%d no=%d demuxr:%d demuxq:%d tt:%f tths:%d in:%llu "
+      "out:%llu qs=%u pending:%u waiting:%d sid:%u\n",
+      NeedInput(mVideo), mVideo.HasPromise(), mVideo.mDemuxRequest.Exists(),
+      int(mVideo.mQueuedSamples.Length()),
+      mVideo.mTimeThreshold ? mVideo.mTimeThreshold.ref().Time().ToSeconds()
+                            : -1.0,
+      mVideo.mTimeThreshold ? mVideo.mTimeThreshold.ref().mHasSeeked : -1,
+      mVideo.mNumSamplesInput, mVideo.mNumSamplesOutput,
+      unsigned(size_t(mVideo.mSizeOfQueue)), unsigned(mVideo.mOutput.Length()),
+      mVideo.mWaitingForData, mVideo.mLastStreamSourceID);
   }
   aString += result;
 }
 
 void
 MediaFormatReader::SetVideoBlankDecode(bool aIsBlankDecode)
 {
   MOZ_ASSERT(OnTaskQueue());
@@ -2803,18 +2834,18 @@ MediaFormatReader::SetBlankDecode(TrackT
   LOG("%s, decoder.mIsBlankDecode = %d => aIsBlankDecode = %d",
       TrackTypeToStr(aTrack), decoder.mIsBlankDecode, aIsBlankDecode);
 
   decoder.mIsBlankDecode = aIsBlankDecode;
   ShutdownDecoder(aTrack);
 }
 
 void
-MediaFormatReader::OnFirstDemuxCompleted(TrackInfo::TrackType aType,
-                                         RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
+MediaFormatReader::OnFirstDemuxCompleted(
+  TrackInfo::TrackType aType, RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
 {
   MOZ_ASSERT(OnTaskQueue());
 
   if (mShutdown) {
     return;
   }
 
   auto& decoder = GetDecoderData(aType);
--- a/dom/media/MediaFormatReader.h
+++ b/dom/media/MediaFormatReader.h
@@ -104,23 +104,25 @@ private:
   void RequestDemuxSamples(TrackType aTrack);
   // Handle demuxed samples by the input behavior.
   void HandleDemuxedSamples(TrackType aTrack,
                             AbstractMediaDecoder::AutoNotifyDecoded& aA);
   // Decode any pending already demuxed samples.
   void DecodeDemuxedSamples(TrackType aTrack,
                             MediaRawData* aSample);
 
-  struct InternalSeekTarget {
+  struct InternalSeekTarget
+  {
     InternalSeekTarget(const media::TimeInterval& aTime, bool aDropTarget)
       : mTime(aTime)
       , mDropTarget(aDropTarget)
       , mWaiting(false)
       , mHasSeeked(false)
-    {}
+    {
+    }
 
     media::TimeUnit Time() const { return mTime.mStart; }
     media::TimeUnit EndTime() const { return mTime.mEnd; }
     bool Contains(const media::TimeUnit& aTime) const
     {
       return mTime.Contains(aTime);
     }
 
@@ -155,17 +157,18 @@ private:
   bool ShouldSkip(bool aSkipToNextKeyframe, media::TimeUnit aTimeThreshold);
 
   void SetVideoDecodeThreshold();
 
   size_t SizeOfQueue(TrackType aTrack);
 
   RefPtr<PDMFactory> mPlatform;
 
-  struct DecoderData {
+  struct DecoderData
+  {
     DecoderData(MediaFormatReader* aOwner,
                 MediaData::Type aType,
                 uint32_t aNumOfMaxError)
       : mOwner(aOwner)
       , mType(aType)
       , mMutex("DecoderData")
       , mDescription("shutdown")
       , mUpdateScheduled(false)
@@ -182,17 +185,18 @@ private:
       , mNumSamplesInput(0)
       , mNumSamplesOutput(0)
       , mNumSamplesOutputTotal(0)
       , mNumSamplesSkippedTotal(0)
       , mSizeOfQueue(0)
       , mIsHardwareAccelerated(false)
       , mLastStreamSourceID(UINT32_MAX)
       , mIsBlankDecode(false)
-    {}
+    {
+    }
 
     MediaFormatReader* mOwner;
     // Disambiguate Audio vs Video.
     MediaData::Type mType;
     RefPtr<MediaTrackDemuxer> mTrackDemuxer;
     // TaskQueue on which decoder can choose to decode.
     // Only non-null up until the decoder is created.
     RefPtr<TaskQueue> mTaskQueue;
@@ -203,18 +207,18 @@ private:
     RefPtr<MediaDataDecoder> mDecoder;
     const char* mDescription;
     void ShutdownDecoder()
     {
       MutexAutoLock lock(mMutex);
       if (mDecoder) {
         RefPtr<MediaFormatReader> owner = mOwner;
         TrackType type = mType == MediaData::AUDIO_DATA
-                           ? TrackType::kAudioTrack
-                           : TrackType::kVideoTrack;
+                         ? TrackType::kAudioTrack
+                         : TrackType::kVideoTrack;
         mDecoder->Shutdown()
           ->Then(mOwner->OwnerThread(), __func__,
                  [owner, this, type]() {
                    mShutdownRequest.Complete();
                    mShutdownPromise.ResolveIfExists(true, __func__);
                    owner->ScheduleUpdate(type);
                  },
                  []() { MOZ_RELEASE_ASSERT(false, "Can't ever be here"); })
@@ -338,18 +342,18 @@ private:
       mNumSamplesInput = 0;
       mNumSamplesOutput = 0;
       mSizeOfQueue = 0;
       mDraining = false;
       mDrainComplete = false;
       if (mDecoder && !mFlushed) {
         RefPtr<MediaFormatReader> owner = mOwner;
         TrackType type = mType == MediaData::AUDIO_DATA
-                           ? TrackType::kAudioTrack
-                           : TrackType::kVideoTrack;
+                         ? TrackType::kAudioTrack
+                         : TrackType::kVideoTrack;
         mDecoder->Flush()
           ->Then(mOwner->OwnerThread(), __func__,
                  [owner, type, this]() {
                    mFlushRequest.Complete();
                    if (!mShutdownPromise.IsEmpty()) {
                      ShutdownDecoder();
                      return;
                    }
@@ -414,25 +418,26 @@ private:
     UniquePtr<TrackInfo> mOriginalInfo;
     RefPtr<SharedTrackInfo> mInfo;
     Maybe<media::TimeUnit> mFirstDemuxedSampleTime;
     // Use BlankDecoderModule or not.
     bool mIsBlankDecode;
 
   };
 
-  class DecoderDataWithPromise : public DecoderData {
+  class DecoderDataWithPromise : public DecoderData
+  {
   public:
     DecoderDataWithPromise(MediaFormatReader* aOwner,
                            MediaData::Type aType,
                            uint32_t aNumOfMaxError)
       : DecoderData(aOwner, aType, aNumOfMaxError)
       , mHasPromise(false)
-
-    {}
+    {
+    }
 
     bool HasPromise() const override
     {
       return mHasPromise;
     }
 
     RefPtr<MediaDataPromise> EnsurePromise(const char* aMethodName) override
     {
--- a/dom/media/MediaInfo.h
+++ b/dom/media/MediaInfo.h
@@ -18,33 +18,37 @@
 #include "TimeUnits.h"
 
 namespace mozilla {
 
 class AudioInfo;
 class VideoInfo;
 class TextInfo;
 
-class MetadataTag {
+class MetadataTag
+{
 public:
   MetadataTag(const nsACString& aKey,
               const nsACString& aValue)
     : mKey(aKey)
     , mValue(aValue)
-  {}
+  {
+  }
   nsCString mKey;
   nsCString mValue;
 };
 
   // Maximum channel number we can currently handle (7.1)
 #define MAX_AUDIO_CHANNELS 8
 
-class TrackInfo {
+class TrackInfo
+{
 public:
-  enum TrackType {
+  enum TrackType
+  {
     kUndefinedTrack,
     kAudioTrack,
     kVideoTrack,
     kTextTrack
   };
   TrackInfo(TrackType aType,
             const nsAString& aId,
             const nsAString& aKind,
@@ -170,19 +174,21 @@ protected:
     MOZ_COUNT_CTOR(TrackInfo);
   }
 
 private:
   TrackType mType;
 };
 
 // Stores info relevant to presenting media frames.
-class VideoInfo : public TrackInfo {
+class VideoInfo : public TrackInfo
+{
 public:
-  enum Rotation {
+  enum Rotation
+  {
     kDegree_0 = 0,
     kDegree_90 = 90,
     kDegree_180 = 180,
     kDegree_270 = 270,
   };
   VideoInfo()
     : VideoInfo(-1, -1)
   {
@@ -267,18 +273,19 @@ public:
   // If aWidth and aHeight are identical to the original mImage.width/mImage.height
   // then the scaling ratio will be 1.
   // This is used for when the frame size is different from what the container
   // reports. This is legal in WebM, and we will preserve the ratio of the crop
   // rectangle as it was reported relative to the picture size reported by the
   // container.
   nsIntRect ScaledImageRect(int64_t aWidth, int64_t aHeight) const
   {
-    if ((aWidth == mImage.width && aHeight == mImage.height) ||
-        !mImage.width || !mImage.height) {
+    if ((aWidth == mImage.width && aHeight == mImage.height)
+        || !mImage.width
+        || !mImage.height) {
       return ImageRect();
     }
     nsIntRect imageRect = ImageRect();
     imageRect.x = (imageRect.x * aWidth) / mImage.width;
     imageRect.y = (imageRect.y * aHeight) / mImage.height;
     imageRect.width = (aWidth * imageRect.width) / mImage.width;
     imageRect.height = (aHeight * imageRect.height) / mImage.height;
     return imageRect;
@@ -320,17 +327,18 @@ private:
   // mImage may be cropped; currently only used with the WebM container.
   // A negative width or height indicate that no cropping is to occur.
   nsIntRect mImageRect;
 
   // Indicates whether or not frames may contain alpha information.
   bool mAlphaPresent = false;
 };
 
-class AudioInfo : public TrackInfo {
+class AudioInfo : public TrackInfo
+{
 public:
   AudioInfo()
     : TrackInfo(kAudioTrack, NS_LITERAL_STRING("1"), NS_LITERAL_STRING("main"),
                 EmptyString(), EmptyString(), true, 1)
     , mRate(0)
     , mChannels(0)
     , mBitDepth(0)
     , mProfile(0)
@@ -387,27 +395,28 @@ public:
   // Codec profile.
   int8_t mProfile;
 
   // Extended codec profile.
   int8_t mExtendedProfile;
 
   RefPtr<MediaByteBuffer> mCodecSpecificConfig;
   RefPtr<MediaByteBuffer> mExtraData;
-
 };
 
-class EncryptionInfo {
+class EncryptionInfo
+{
 public:
   EncryptionInfo()
     : mEncrypted(false)
   {
   }
 
-  struct InitData {
+  struct InitData
+  {
     template<typename AInitDatas>
     InitData(const nsAString& aType, AInitDatas&& aInitData)
       : mType(aType)
       , mInitData(Forward<AInitDatas>(aInitData))
     {
     }
 
     // Encryption type to be passed to JS. Usually `cenc'.
@@ -444,17 +453,18 @@ public:
   }
 
   // One 'InitData' per encrypted buffer.
   InitDatas mInitDatas;
 private:
   bool mEncrypted;
 };
 
-class MediaInfo {
+class MediaInfo
+{
 public:
   bool HasVideo() const
   {
     return mVideo.IsValid();
   }
 
   void EnableVideo()
   {
@@ -479,33 +489,34 @@ public:
     // Set dummy values so that HasAudio() will return true;
     // See AudioInfo::IsValid()
     mAudio.mChannels = 2;
     mAudio.mRate = 44100;
   }
 
   bool IsEncrypted() const
   {
-    return (HasAudio() && mAudio.mCrypto.mValid) ||
-           (HasVideo() && mVideo.mCrypto.mValid);
+    return (HasAudio() && mAudio.mCrypto.mValid)
+           || (HasVideo() && mVideo.mCrypto.mValid);
   }
 
   bool HasValidMedia() const
   {
     return HasVideo() || HasAudio();
   }
 
   void AssertValid() const
   {
     NS_ASSERTION(!HasAudio() || mAudio.mTrackId != TRACK_INVALID,
                  "Audio track ID must be valid");
     NS_ASSERTION(!HasVideo() || mVideo.mTrackId != TRACK_INVALID,
                  "Audio track ID must be valid");
-    NS_ASSERTION(!HasAudio() || !HasVideo() ||
-                 mAudio.mTrackId != mVideo.mTrackId,
+    NS_ASSERTION(!HasAudio()
+                 || !HasVideo()
+                 || mAudio.mTrackId != mVideo.mTrackId,
                  "Duplicate track IDs");
   }
 
   // TODO: Store VideoInfo and AudioIndo in arrays to support multi-tracks.
   VideoInfo mVideo;
   AudioInfo mAudio;
 
   // If the metadata includes a duration, we store it here.
@@ -524,17 +535,18 @@ public:
 
   EncryptionInfo mCrypto;
 
   // The minimum of start times of audio and video tracks.
   // Use to map the zero time on the media timeline to the first frame.
   media::TimeUnit mStartTime;
 };
 
-class SharedTrackInfo {
+class SharedTrackInfo
+{
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SharedTrackInfo)
 public:
   SharedTrackInfo(const TrackInfo& aOriginal, uint32_t aStreamID)
     : mInfo(aOriginal.Clone())
     , mStreamSourceID(aStreamID)
     , mMimeType(mInfo->mMimeType)
   {
   }
@@ -566,50 +578,51 @@ public:
   }
 
   const TextInfo* GetAsTextInfo() const
   {
     return mInfo ? mInfo->GetAsTextInfo() : nullptr;
   }
 
 private:
-  ~SharedTrackInfo() {};
+  ~SharedTrackInfo() { }
   UniquePtr<TrackInfo> mInfo;
   // A unique ID, guaranteed to change when changing streams.
   uint32_t mStreamSourceID;
 
 public:
   const nsCString& mMimeType;
 };
 
-class AudioConfig {
+class AudioConfig
+{
 public:
-  enum Channel {
+  enum Channel
+  {
     CHANNEL_INVALID = -1,
     CHANNEL_MONO = 0,
     CHANNEL_LEFT,
     CHANNEL_RIGHT,
     CHANNEL_CENTER,
     CHANNEL_LS,
     CHANNEL_RS,
     CHANNEL_RLS,
     CHANNEL_RCENTER,
     CHANNEL_RRS,
     CHANNEL_LFE,
   };
 
-  class ChannelLayout {
+  class ChannelLayout
+  {
   public:
-    ChannelLayout()
-      : mChannelMap(0)
-      , mValid(false)
-    {}
+    ChannelLayout() : mChannelMap(0), mValid(false) { }
     explicit ChannelLayout(uint32_t aChannels)
       : ChannelLayout(aChannels, SMPTEDefault(aChannels))
-    {}
+    {
+    }
     ChannelLayout(uint32_t aChannels, const Channel* aConfig)
       : ChannelLayout()
     {
       if (!aConfig) {
         mValid = false;
         return;
       }
       mChannels.AppendElements(aConfig, aChannels);
@@ -640,32 +653,31 @@ public:
     // out[channel] = in[map[channel]].
     // Returns true if the reordering is possible or false otherwise.
     // If true, then aMap, if set, will be updated to contain the mapping table
     // allowing conversion from the current layout to aOther.
     // If aMap is nullptr, then MappingTable can be used to simply determine if
     // the current layout can be easily reordered to aOther.
     // aMap must be an array of size MAX_AUDIO_CHANNELS.
     bool MappingTable(const ChannelLayout& aOther, uint8_t* aMap = nullptr) const;
-    bool IsValid() const {
-      return mValid;
-    }
+    bool IsValid() const { return mValid; }
     bool HasChannel(Channel aChannel) const
     {
       return mChannelMap & (1 << aChannel);
     }
   private:
     void UpdateChannelMap();
     const Channel* SMPTEDefault(uint32_t aChannels) const;
     AutoTArray<Channel, MAX_AUDIO_CHANNELS> mChannels;
     uint32_t mChannelMap;
     bool mValid;
   };
 
-  enum SampleFormat {
+  enum SampleFormat
+  {
     FORMAT_NONE = 0,
     FORMAT_U8,
     FORMAT_S16,
     FORMAT_S24LSB,
     FORMAT_S24,
     FORMAT_S32,
     FORMAT_FLT,
 #if defined(MOZ_SAMPLE_TYPE_FLOAT32)
@@ -705,19 +717,20 @@ public:
     return mFormat;
   }
   bool Interleaved() const
   {
     return mInterleaved;
   }
   bool operator==(const AudioConfig& aOther) const
   {
-    return mChannelLayout == aOther.mChannelLayout &&
-      mRate == aOther.mRate && mFormat == aOther.mFormat &&
-      mInterleaved == aOther.mInterleaved;
+    return mChannelLayout == aOther.mChannelLayout
+      && mRate == aOther.mRate
+      && mFormat == aOther.mFormat
+      && mInterleaved == aOther.mInterleaved;
   }
   bool operator!=(const AudioConfig& aOther) const
   {
     return !(*this == aOther);
   }
 
   bool IsValid() const
   {
--- a/dom/media/MediaRecorder.cpp
+++ b/dom/media/MediaRecorder.cpp
@@ -700,18 +700,18 @@ private:
     }
     return PrincipalSubsumes(principal);
   }
 
   bool AudioNodePrincipalSubsumes()
   {
     MOZ_ASSERT(mRecorder->mAudioNode != nullptr);
     nsIDocument* doc = mRecorder->mAudioNode->GetOwner()
-                     ? mRecorder->mAudioNode->GetOwner()->GetExtantDoc()
-                     : nullptr;
+                       ? mRecorder->mAudioNode->GetOwner()->GetExtantDoc()
+                       : nullptr;
     nsCOMPtr<nsIPrincipal> principal = doc ? doc->NodePrincipal() : nullptr;
     return PrincipalSubsumes(principal);
   }
 
   void InitEncoder(uint8_t aTrackTypes, TrackRate aTrackRate)
   {
     LOG(LogLevel::Debug, ("Session.InitEncoder %p", this));
     MOZ_ASSERT(NS_IsMainThread());
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -202,18 +202,20 @@ MediaStreamGraphImpl::ExtractPendingInpu
     bool shouldNotifyTrackCreated = false;
     for (int32_t i = aStream->mUpdateTracks.Length() - 1; i >= 0; --i) {
       SourceMediaStream::TrackData* data = &aStream->mUpdateTracks[i];
       aStream->ApplyTrackDisabling(data->mID, data->mData);
       // Dealing with NotifyQueuedTrackChanges and NotifyQueuedAudioData part.
 
       // The logic is different from the manipulating of aStream->mTracks part.
       // So it is not combined with the manipulating of aStream->mTracks part.
-      StreamTime offset = (data->mCommands & SourceMediaStream::TRACK_CREATE)
-          ? data->mStart : aStream->mTracks.FindTrack(data->mID)->GetSegment()->GetDuration();
+      StreamTime offset =
+        (data->mCommands & SourceMediaStream::TRACK_CREATE)
+        ? data->mStart
+        : aStream->mTracks.FindTrack(data->mID)->GetSegment()->GetDuration();
 
       // Audio case.
       if (data->mData->GetType() == MediaSegment::AUDIO) {
         if (data->mCommands) {
           MOZ_ASSERT(!(data->mCommands & SourceMediaStream::TRACK_UNUSED));
           for (MediaStreamListener* l : aStream->mListeners) {
             if (data->mCommands & SourceMediaStream::TRACK_END) {
               l->NotifyQueuedAudioData(this, data->mID,
@@ -390,23 +392,24 @@ MediaStreamGraphImpl::ProcessChunkMetada
     }
     offset += chunk->GetDuration();
     if (chunk->IsNull() || offset < aStart) {
       continue;
     }
     PrincipalHandle principalHandle = chunk->GetPrincipalHandle();
     if (principalHandle != aSegment.GetLastPrincipalHandle()) {
       aSegment.SetLastPrincipalHandle(principalHandle);
-      STREAM_LOG(LogLevel::Debug, ("MediaStream %p track %d, principalHandle "
-                                   "changed in %sChunk with duration %lld",
-                                   aStream, aTrackID,
-                                   aSegment.GetType() == MediaSegment::AUDIO
-                                       ? "Audio" : "Video",
-                                   (long long) chunk->GetDuration()));
-      for (const TrackBound<MediaStreamTrackListener>& listener : aStream->mTrackListeners) {
+      STREAM_LOG(LogLevel::Debug,
+                 ("MediaStream %p track %d, principalHandle "
+                  "changed in %sChunk with duration %lld",
+                  aStream, aTrackID,
+                  aSegment.GetType() == MediaSegment::AUDIO ? "Audio" : "Video",
+                  (long long)chunk->GetDuration()));
+      for (const TrackBound<MediaStreamTrackListener>& listener :
+           aStream->mTrackListeners) {
         if (listener.mTrackID == aTrackID) {
           listener.mListener->NotifyPrincipalHandleChanged(this, principalHandle);
         }
       }
     }
   }
 }
 
--- a/dom/media/MediaStreamListener.cpp
+++ b/dom/media/MediaStreamListener.cpp
@@ -50,18 +50,18 @@ DirectMediaStreamTrackListener::NotifyRe
                                                                              MediaSegment& aMedia)
 {
   if (mDisabledFreezeCount == 0 && mDisabledBlackCount == 0) {
     NotifyRealtimeTrackData(aGraph, aTrackOffset, aMedia);
     return;
   }
 
   DisabledTrackMode mode = mDisabledBlackCount > 0
-                         ? DisabledTrackMode::SILENCE_BLACK
-                         : DisabledTrackMode::SILENCE_FREEZE;
+                           ? DisabledTrackMode::SILENCE_BLACK
+                           : DisabledTrackMode::SILENCE_FREEZE;
   if (!mMedia) {
     mMedia = aMedia.CreateEmptyClone();
   }
   if (aMedia.GetType() == MediaSegment::AUDIO) {
     MirrorAndDisableSegment(static_cast<AudioSegment&>(aMedia),
                             static_cast<AudioSegment&>(*mMedia));
   } else if (aMedia.GetType() == MediaSegment::VIDEO) {
     MirrorAndDisableSegment(static_cast<VideoSegment&>(aMedia),
--- a/dom/media/flac/FlacDemuxer.cpp
+++ b/dom/media/flac/FlacDemuxer.cpp
@@ -119,19 +119,18 @@ public:
     } else {
       mBlocksize = FlacBlocksizeTable[bs_code];
     }
 
     // The sample index is either:
     // 1- coded sample number if blocksize is variable or
     // 2- coded frame number if blocksize is known.
     // A frame is made of Blocksize sample.
-    mIndex = mVariableBlockSize
-      ? frame_or_sample_num
-      : frame_or_sample_num * mBlocksize;
+    mIndex = mVariableBlockSize ? frame_or_sample_num
+                                : frame_or_sample_num * mBlocksize;
 
     // Sample rate.
     if (sr_code < 12) {
       mInfo.mRate = FlacSampleRateTable[sr_code];
     } else if (sr_code == 12) {
       mInfo.mRate = br.ReadBits(8) * 1000;
     } else if (sr_code == 13) {
       mInfo.mRate = br.ReadBits(16);
--- a/dom/media/mediasink/DecodedAudioDataSink.cpp
+++ b/dom/media/mediasink/DecodedAudioDataSink.cpp
@@ -66,18 +66,20 @@ DecodedAudioDataSink::DecodedAudioDataSi
   } else {
     // We will resample all data to match cubeb's preferred sampling rate.
     mOutputRate = AudioStream::GetPreferredRate();
   }
   MOZ_DIAGNOSTIC_ASSERT(mOutputRate, "output rate can't be 0.");
 
   bool monoAudioEnabled = MediaPrefs::MonoAudio();
 
-  mOutputChannels = monoAudioEnabled
-    ? 1 : (MediaPrefs::AudioSinkForceStereo() ? 2 : mInfo.mChannels);
+  mOutputChannels =
+    monoAudioEnabled
+    ? 1
+    : (MediaPrefs::AudioSinkForceStereo() ? 2 : mInfo.mChannels);
 }
 
 DecodedAudioDataSink::~DecodedAudioDataSink()
 {
 }
 
 RefPtr<GenericPromise>
 DecodedAudioDataSink::Init(const PlaybackParams& aParams)
--- a/dom/media/mediasource/ContainerParser.cpp
+++ b/dom/media/mediasource/ContainerParser.cpp
@@ -305,17 +305,18 @@ public:
     mLastMapping = Some(mapping[completeIdx]);
 
     if (!previousMapping && completeIdx + 1u >= mapping.Length()) {
       // We have no previous nor next block available,
       // so we can't estimate this block's duration.
       return NS_ERROR_NOT_AVAILABLE;
     }
 
-    uint64_t frameDuration = (completeIdx + 1u < mapping.Length())
+    uint64_t frameDuration =
+      (completeIdx + 1u < mapping.Length())
       ? mapping[completeIdx + 1].mTimecode - mapping[completeIdx].mTimecode
       : mapping[completeIdx].mTimecode - previousMapping.ref().mTimecode;
     aStart = mapping[0].mTimecode / NS_PER_USEC;
     aEnd = (mapping[completeIdx].mTimecode + frameDuration) / NS_PER_USEC;
 
     MSE_DEBUG(WebMContainerParser, "[%lld, %lld] [fso=%lld, leo=%lld, l=%u processedIdx=%u fs=%lld]",
               aStart, aEnd, mapping[0].mSyncOffset,
               mapping[completeIdx].mEndOffset, mapping.Length(), completeIdx,
--- a/dom/media/mediasource/MediaSourceDecoder.cpp
+++ b/dom/media/mediasource/MediaSourceDecoder.cpp
@@ -279,18 +279,18 @@ MediaSourceDecoder::NextFrameBufferedSta
   TimeUnit currentPosition = TimeUnit::FromMicroseconds(CurrentPosition());
   TimeIntervals buffered = GetBuffered();
   buffered.SetFuzz(MediaSourceDemuxer::EOS_FUZZ / 2);
   TimeInterval interval(
     currentPosition,
     currentPosition
     + media::TimeUnit::FromMicroseconds(DEFAULT_NEXT_FRAME_AVAILABLE_BUFFERED));
   return buffered.ContainsStrict(ClampIntervalToEnd(interval))
-    ? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
-    : MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
+         ? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
+         : MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
 }
 
 bool
 MediaSourceDecoder::CanPlayThrough()
 {
   MOZ_ASSERT(NS_IsMainThread());
 
   if (NextFrameBufferedStatus() == MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE) {
--- a/dom/media/mediasource/MediaSourceDemuxer.cpp
+++ b/dom/media/mediasource/MediaSourceDemuxer.cpp
@@ -286,20 +286,19 @@ MediaSourceDemuxer::GetMozDebugReaderDat
 MediaSourceTrackDemuxer::MediaSourceTrackDemuxer(MediaSourceDemuxer* aParent,
                                                  TrackInfo::TrackType aType,
                                                  TrackBuffersManager* aManager)
   : mParent(aParent)
   , mManager(aManager)
   , mType(aType)
   , mMonitor("MediaSourceTrackDemuxer")
   , mReset(true)
-  , mPreRoll(
-      TimeUnit::FromMicroseconds(
-        OpusDataDecoder::IsOpus(mParent->GetTrackInfo(mType)->mMimeType)
-          ? 80000 : 0))
+  , mPreRoll(TimeUnit::FromMicroseconds(
+      OpusDataDecoder::IsOpus(mParent->GetTrackInfo(mType)->mMimeType) ? 80000
+                                                                       : 0))
 {
 }
 
 UniquePtr<TrackInfo>
 MediaSourceTrackDemuxer::GetInfo() const
 {
   return mParent->GetTrackInfo(mType)->Clone();
 }
--- a/dom/media/mediasource/TrackBuffersManager.cpp
+++ b/dom/media/mediasource/TrackBuffersManager.cpp
@@ -1314,23 +1314,24 @@ TrackBuffersManager::CompleteCodedFrameP
     return;
   }
 
   mLastParsedEndTime = Some(std::max(mAudioTracks.mLastParsedEndTime,
                                      mVideoTracks.mLastParsedEndTime));
 
   // 6. Remove the media segment bytes from the beginning of the input buffer.
   // Clear our demuxer from any already processed data.
-  int64_t safeToEvict = std::min(
-    HasVideo()
-      ? mVideoTracks.mDemuxer->GetEvictionOffset(mVideoTracks.mLastParsedEndTime)
-      : INT64_MAX,
-    HasAudio()
-      ? mAudioTracks.mDemuxer->GetEvictionOffset(mAudioTracks.mLastParsedEndTime)
-      : INT64_MAX);
+  int64_t safeToEvict = std::min(HasVideo()
+                                 ? mVideoTracks.mDemuxer->GetEvictionOffset(
+                                     mVideoTracks.mLastParsedEndTime)
+                                 : INT64_MAX,
+                                 HasAudio()
+                                 ? mAudioTracks.mDemuxer->GetEvictionOffset(
+                                     mAudioTracks.mLastParsedEndTime)
+                                 : INT64_MAX);
   ErrorResult rv;
   mCurrentInputBuffer->EvictBefore(safeToEvict, rv);
   if (rv.Failed()) {
     rv.SuppressException();
     RejectProcessing(NS_ERROR_OUT_OF_MEMORY, __func__);
     return;
   }
 
@@ -1393,36 +1394,39 @@ TrackBuffersManager::ProcessFrames(Track
   if (!aSamples.Length()) {
     return;
   }
 
   // 1. If generate timestamps flag equals true
   // Let presentation timestamp equal 0.
   // Otherwise
   // Let presentation timestamp be a double precision floating point representation of the coded frame's presentation timestamp in seconds.
-  TimeUnit presentationTimestamp = mSourceBufferAttributes->mGenerateTimestamps
-    ? TimeUnit() : TimeUnit::FromMicroseconds(aSamples[0]->mTime);
+  TimeUnit presentationTimestamp =
+    mSourceBufferAttributes->mGenerateTimestamps
+    ? TimeUnit()
+    : TimeUnit::FromMicroseconds(aSamples[0]->mTime);
 
   // 3. If mode equals "sequence" and group start timestamp is set, then run the following steps:
   CheckSequenceDiscontinuity(presentationTimestamp);
 
   // 5. Let track buffer equal the track buffer that the coded frame will be added to.
   auto& trackBuffer = aTrackData;
 
   // Some videos do not exactly start at 0, but instead a small negative value.
   // To avoid evicting the starting frame of those videos, we allow a leeway
   // of +- mLongestFrameDuration on the append window start.
   // We only apply the leeway with the default append window start of 0
   // otherwise do as per spec.
-  TimeInterval targetWindow = mAppendWindow.mStart != TimeUnit::FromSeconds(0)
+  TimeInterval targetWindow =
+    mAppendWindow.mStart != TimeUnit::FromSeconds(0)
     ? mAppendWindow
     : TimeInterval(mAppendWindow.mStart, mAppendWindow.mEnd,
                    trackBuffer.mLastFrameDuration.isSome()
-                     ? trackBuffer.mLongestFrameDuration
-                     : TimeUnit::FromMicroseconds(aSamples[0]->mDuration));
+                   ? trackBuffer.mLongestFrameDuration
+                   : TimeUnit::FromMicroseconds(aSamples[0]->mDuration));
 
   TimeIntervals samplesRange;
   uint32_t sizeNewSamples = 0;
   TrackBuffer samples; // array that will contain the frames to be added
                        // to our track buffer.
 
   // We assume that no frames are contiguous within a media segment and as such
   // don't need to check for discontinuity except for the first frame and should
@@ -1479,23 +1483,22 @@ TrackBuffersManager::ProcessFrames(Track
 
     TimeUnit sampleTime = TimeUnit::FromMicroseconds(sample->mTime);
     TimeUnit sampleTimecode = TimeUnit::FromMicroseconds(sample->mTimecode);
     TimeUnit sampleDuration = TimeUnit::FromMicroseconds(sample->mDuration);
     TimeUnit timestampOffset = mSourceBufferAttributes->GetTimestampOffset();
 
     TimeInterval sampleInterval =
       mSourceBufferAttributes->mGenerateTimestamps
-        ? TimeInterval(timestampOffset, timestampOffset + sampleDuration)
-        : TimeInterval(timestampOffset + sampleTime,
-                       timestampOffset + sampleTime + sampleDuration);
-    TimeUnit decodeTimestamp =
-      mSourceBufferAttributes->mGenerateTimestamps
-        ? timestampOffset
-        : timestampOffset + sampleTimecode;
+      ? TimeInterval(timestampOffset, timestampOffset + sampleDuration)
+      : TimeInterval(timestampOffset + sampleTime,
+                     timestampOffset + sampleTime + sampleDuration);
+    TimeUnit decodeTimestamp = mSourceBufferAttributes->mGenerateTimestamps
+                               ? timestampOffset
+                               : timestampOffset + sampleTimecode;
 
     // 6. If last decode timestamp for track buffer is set and decode timestamp is less than last decode timestamp:
     // OR
     // If last decode timestamp for track buffer is set and the difference between decode timestamp and last decode timestamp is greater than 2 times last frame duration:
 
     if (needDiscontinuityCheck && trackBuffer.mLastDecodeTimestamp.isSome() &&
         (decodeTimestamp < trackBuffer.mLastDecodeTimestamp.ref() ||
          (decodeTimestamp - trackBuffer.mLastDecodeTimestamp.ref()
@@ -1520,36 +1523,35 @@ TrackBuffersManager::ProcessFrames(Track
         // 4. Unset the highest end timestamp on all track buffers.
         // 5. Set the need random access point flag on all track buffers to true.
         track->ResetAppendState();
       }
       // 6. Jump to the Loop Top step above to restart processing of the current coded frame.
       // Rather that restarting the process for the frame, we run the first
       // steps again instead.
       // 3. If mode equals "sequence" and group start timestamp is set, then run the following steps:
-      TimeUnit presentationTimestamp = mSourceBufferAttributes->mGenerateTimestamps
-        ? TimeUnit() : sampleTime;
+      TimeUnit presentationTimestamp =
+        mSourceBufferAttributes->mGenerateTimestamps ? TimeUnit() : sampleTime;
       CheckSequenceDiscontinuity(presentationTimestamp);
 
       if (!sample->mKeyframe) {
         continue;
       }
       if (appendMode == SourceBufferAppendMode::Sequence) {
         // mSourceBufferAttributes->GetTimestampOffset() was modified during CheckSequenceDiscontinuity.
         // We need to update our variables.
         timestampOffset = mSourceBufferAttributes->GetTimestampOffset();
         sampleInterval =
           mSourceBufferAttributes->mGenerateTimestamps
-            ? TimeInterval(timestampOffset, timestampOffset + sampleDuration)
-            : TimeInterval(timestampOffset + sampleTime,
-                           timestampOffset + sampleTime + sampleDuration);
-        decodeTimestamp =
-          mSourceBufferAttributes->mGenerateTimestamps
-            ? timestampOffset
-            : timestampOffset + sampleTimecode;
+          ? TimeInterval(timestampOffset, timestampOffset + sampleDuration)
+          : TimeInterval(timestampOffset + sampleTime,
+                         timestampOffset + sampleTime + sampleDuration);
+        decodeTimestamp = mSourceBufferAttributes->mGenerateTimestamps
+                          ? timestampOffset
+                          : timestampOffset + sampleTimecode;
       }
       trackBuffer.mNeedRandomAccessPoint = false;
       needDiscontinuityCheck = false;
     }
 
     // 7. Let frame end timestamp equal the sum of presentation timestamp and frame duration.
     // This is sampleInterval.mEnd
 
--- a/dom/media/ogg/OggCodecState.cpp
+++ b/dom/media/ogg/OggCodecState.cpp
@@ -358,18 +358,18 @@ TheoraState::Init()
 {
   if (!mActive) {
     return false;
   }
 
   int64_t n = mTheoraInfo.aspect_numerator;
   int64_t d = mTheoraInfo.aspect_denominator;
 
-  float aspectRatio = (n == 0 || d == 0)
-    ? 1.0f : static_cast<float>(n) / static_cast<float>(d);
+  float aspectRatio =
+    (n == 0 || d == 0) ? 1.0f : static_cast<float>(n) / static_cast<float>(d);
 
   // Ensure the frame and picture regions aren't larger than our prescribed
   // maximum, or zero sized.
   nsIntSize frame(mTheoraInfo.frame_width, mTheoraInfo.frame_height);
   nsIntRect picture(mTheoraInfo.pic_x, mTheoraInfo.pic_y, mTheoraInfo.pic_width, mTheoraInfo.pic_height);
   nsIntSize display(mTheoraInfo.pic_width, mTheoraInfo.pic_height);
   ScaleDisplayByAspectRatio(display, aspectRatio);
   if (!IsValidVideoRegion(frame, picture, display)) {
--- a/dom/media/webaudio/AudioBuffer.cpp
+++ b/dom/media/webaudio/AudioBuffer.cpp
@@ -390,20 +390,20 @@ AudioBuffer::StealJSArrayDataIntoSharedC
     bool isSharedMemory;
     JS::Rooted<JSObject*> arrayBuffer(aJSContext,
                                       JS_GetArrayBufferViewBuffer(aJSContext,
                                                                   arrayBufferView,
                                                                   &isSharedMemory));
     // The channel data arrays should all have originated in
     // RestoreJSChannelData, where they are created unshared.
     MOZ_ASSERT(!isSharedMemory);
-    auto stolenData = arrayBuffer
-      ? static_cast<float*>(JS_StealArrayBufferContents(aJSContext,
-                                                        arrayBuffer))
-      : nullptr;
+    auto stolenData =
+      arrayBuffer ? static_cast<float*>(
+                      JS_StealArrayBufferContents(aJSContext, arrayBuffer))
+                  : nullptr;
     if (stolenData) {
       result->SetData(i, stolenData, js_free, stolenData);
     } else {
       NS_ASSERTION(i == 0, "some channels lost when contents not acquired");
       return nullptr;
     }
   }