Bug 1396515: Fix incorrect && and || operator placements. r?Sylvestre draft
authorJean-Yves Avenard <jyavenard@mozilla.com>
Mon, 04 Sep 2017 11:27:43 +0200
changeset 658499 1a1e777d7bb648a2a9607f26869438a0fca8e261
parent 658498 26337226cd7b2ba062ef586cac1364a76b3eb81f
child 729683 b9920bf5c2c93855d1c662c91a935cf7073e56ba
push id77800
push userbmo:jyavenard@mozilla.com
push dateMon, 04 Sep 2017 09:45:01 +0000
reviewersSylvestre
bugs1396515
milestone57.0a1
Bug 1396515: Fix incorrect && and || operator placements. r?Sylvestre Includes some side 80 columns fix. MozReview-Commit-ID: IXRoQfM0Hjc
dom/media/ADTSDecoder.cpp
dom/media/Benchmark.cpp
dom/media/CubebUtils.cpp
dom/media/DecoderTraits.cpp
dom/media/Intervals.h
dom/media/MediaData.cpp
dom/media/MediaDecoder.cpp
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaFormatReader.cpp
dom/media/MediaFormatReader.h
dom/media/MediaInfo.h
dom/media/VideoUtils.h
dom/media/encoder/MediaEncoder.cpp
dom/media/flac/FlacDecoder.cpp
dom/media/flac/FlacDemuxer.cpp
dom/media/fmp4/MP4Decoder.cpp
dom/media/fmp4/MP4Demuxer.cpp
dom/media/gtest/TestVPXDecoding.cpp
dom/media/mediasource/MediaSourceDecoder.cpp
dom/media/mediasource/TrackBuffersManager.cpp
dom/media/mp3/MP3Decoder.cpp
dom/media/mp3/MP3Demuxer.cpp
dom/media/mp3/MP3FrameParser.cpp
dom/media/ogg/OggCodecState.cpp
dom/media/platforms/PDMFactory.cpp
dom/media/platforms/agnostic/AgnosticDecoderModule.cpp
dom/media/platforms/agnostic/BlankDecoderModule.cpp
dom/media/platforms/agnostic/VPXDecoder.cpp
dom/media/platforms/agnostic/gmp/GMPDecoderModule.cpp
dom/media/platforms/android/AndroidDecoderModule.cpp
dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
dom/media/platforms/wmf/DXVA2Manager.cpp
dom/media/platforms/wmf/WMFDecoderModule.cpp
dom/media/platforms/wmf/WMFVideoMFTManager.cpp
dom/media/platforms/wrappers/H264Converter.cpp
dom/media/systemservices/CamerasParent.cpp
dom/media/systemservices/CamerasParent.h
dom/media/systemservices/VideoFrameUtils.cpp
dom/media/wave/WaveDecoder.cpp
dom/media/wave/WaveDemuxer.cpp
dom/media/webaudio/ScriptProcessorNode.cpp
dom/media/webaudio/blink/Reverb.cpp
dom/media/webaudio/blink/ReverbConvolver.cpp
dom/media/webm/WebMDemuxer.cpp
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/webspeech/recognition/SpeechRecognition.cpp
--- a/dom/media/ADTSDecoder.cpp
+++ b/dom/media/ADTSDecoder.cpp
@@ -16,21 +16,19 @@ ADTSDecoder::IsEnabled()
   RefPtr<PDMFactory> platform = new PDMFactory();
   return platform->SupportsMimeType(NS_LITERAL_CSTRING("audio/mp4a-latm"),
                                     /* DecoderDoctorDiagnostics* */ nullptr);
 }
 
 /* static */ bool
 ADTSDecoder::IsSupportedType(const MediaContainerType& aContainerType)
 {
-  if (aContainerType.Type() == MEDIAMIMETYPE("audio/aac")
-      || aContainerType.Type() == MEDIAMIMETYPE("audio/aacp")
-      || aContainerType.Type() == MEDIAMIMETYPE("audio/x-aac")) {
-    return
-      IsEnabled()
-      && (aContainerType.ExtendedType().Codecs().IsEmpty()
-          || aContainerType.ExtendedType().Codecs() == "aac");
+  if (aContainerType.Type() == MEDIAMIMETYPE("audio/aac") ||
+      aContainerType.Type() == MEDIAMIMETYPE("audio/aacp") ||
+      aContainerType.Type() == MEDIAMIMETYPE("audio/x-aac")) {
+    return IsEnabled() && (aContainerType.ExtendedType().Codecs().IsEmpty() ||
+                           aContainerType.ExtendedType().Codecs() == "aac");
   }
 
   return false;
 }
 
 } // namespace mozilla
--- a/dom/media/Benchmark.cpp
+++ b/dom/media/Benchmark.cpp
@@ -192,18 +192,18 @@ BenchmarkPlayback::DemuxNextSample()
   MOZ_ASSERT(OnThread());
 
   RefPtr<Benchmark> ref(mMainThreadState);
   RefPtr<MediaTrackDemuxer::SamplesPromise> promise = mTrackDemuxer->GetSamples();
   promise->Then(
     Thread(), __func__,
     [this, ref](RefPtr<MediaTrackDemuxer::SamplesHolder> aHolder) {
       mSamples.AppendElements(Move(aHolder->mSamples));
-      if (ref->mParameters.mStopAtFrame
-          && mSamples.Length() == (size_t)ref->mParameters.mStopAtFrame.ref()) {
+      if (ref->mParameters.mStopAtFrame &&
+          mSamples.Length() == (size_t)ref->mParameters.mStopAtFrame.ref()) {
         InitDecoder(Move(*mTrackDemuxer->GetInfo()));
       } else {
         Dispatch(NS_NewRunnableFunction("BenchmarkPlayback::DemuxNextSample",
                                         [this, ref]() { DemuxNextSample(); }));
       }
     },
     [this, ref](const MediaResult& aError) {
       switch (aError.Code()) {
@@ -286,20 +286,19 @@ BenchmarkPlayback::Output(const MediaDat
   RefPtr<Benchmark> ref(mMainThreadState);
   mFrameCount += aResults.Length();
   if (!mDecodeStartTime && mFrameCount >= ref->mParameters.mStartupFrame) {
     mDecodeStartTime = Some(TimeStamp::Now());
   }
   TimeStamp now = TimeStamp::Now();
   int32_t frames = mFrameCount - ref->mParameters.mStartupFrame;
   TimeDuration elapsedTime = now - mDecodeStartTime.refOr(now);
-  if (!mFinished
-      && (((frames == ref->mParameters.mFramesToMeasure) && frames > 0)
-          || elapsedTime >= ref->mParameters.mTimeout
-          || mDrained)) {
+  if (!mFinished &&
+      (((frames == ref->mParameters.mFramesToMeasure) && frames > 0) ||
+       elapsedTime >= ref->mParameters.mTimeout || mDrained)) {
     uint32_t decodeFps = frames / elapsedTime.ToSeconds();
     MainThreadShutdown();
     ref->Dispatch(
       NS_NewRunnableFunction("BenchmarkPlayback::Output", [ref, decodeFps]() {
         ref->ReturnResult(decodeFps);
       }));
   }
 }
--- a/dom/media/CubebUtils.cpp
+++ b/dom/media/CubebUtils.cpp
@@ -338,18 +338,18 @@ bool InitPreferredChannelLayout()
   sPreferredChannelLayout = layout;
   return true;
 }
 
 uint32_t PreferredChannelMap(uint32_t aChannels)
 {
   // Use SMPTE default channel map if we can't get preferred layout
   // or the channel counts of preferred layout is different from input's one
-  if (!InitPreferredChannelLayout()
-      || kLayoutInfos[sPreferredChannelLayout].channels != aChannels) {
+  if (!InitPreferredChannelLayout() ||
+      kLayoutInfos[sPreferredChannelLayout].channels != aChannels) {
     AudioConfig::ChannelLayout smpteLayout(aChannels);
     return smpteLayout.Map();
   }
 
   return kLayoutInfos[sPreferredChannelLayout].mask;
 }
 
 void InitBrandName()
--- a/dom/media/DecoderTraits.cpp
+++ b/dom/media/DecoderTraits.cpp
@@ -45,21 +45,21 @@ namespace mozilla
 {
 
 /* static */ bool
 DecoderTraits::IsHttpLiveStreamingType(const MediaContainerType& aType)
 {
   const auto& mimeType = aType.Type();
   return // For m3u8.
          // https://tools.ietf.org/html/draft-pantos-http-live-streaming-19#section-10
-         mimeType == MEDIAMIMETYPE("application/vnd.apple.mpegurl")
-         // Some sites serve these as the informal m3u type.
-         || mimeType == MEDIAMIMETYPE("application/x-mpegurl")
-         || mimeType == MEDIAMIMETYPE("audio/mpegurl")
-         || mimeType == MEDIAMIMETYPE("audio/x-mpegurl");
+    mimeType == MEDIAMIMETYPE("application/vnd.apple.mpegurl") ||
+    // Some sites serve these as the informal m3u type.
+    mimeType == MEDIAMIMETYPE("application/x-mpegurl") ||
+    mimeType == MEDIAMIMETYPE("audio/mpegurl") ||
+    mimeType == MEDIAMIMETYPE("audio/x-mpegurl");
 }
 
 /* static */ bool
 DecoderTraits::IsMP4SupportedType(const MediaContainerType& aType,
                                   DecoderDoctorDiagnostics* aDiagnostics)
 {
 #ifdef MOZ_FMP4
   return MP4Decoder::IsSupportedType(aType, aDiagnostics);
--- a/dom/media/Intervals.h
+++ b/dom/media/Intervals.h
@@ -141,47 +141,47 @@ public:
 
   bool ContainsWithStrictEnd(const T& aX) const
   {
     return mStart - mFuzz <= aX && aX < mEnd;
   }
 
   bool Contains(const SelfType& aOther) const
   {
-    return (mStart - mFuzz <= aOther.mStart + aOther.mFuzz)
-           && (aOther.mEnd - aOther.mFuzz <= mEnd + mFuzz);
+    return (mStart - mFuzz <= aOther.mStart + aOther.mFuzz) &&
+           (aOther.mEnd - aOther.mFuzz <= mEnd + mFuzz);
   }
 
   bool ContainsStrict(const SelfType& aOther) const
   {
     return mStart <= aOther.mStart && aOther.mEnd <= mEnd;
   }
 
   bool ContainsWithStrictEnd(const SelfType& aOther) const
   {
-    return (mStart - mFuzz <= aOther.mStart + aOther.mFuzz)
-           && aOther.mEnd <= mEnd;
+    return (mStart - mFuzz <= aOther.mStart + aOther.mFuzz) &&
+           aOther.mEnd <= mEnd;
   }
 
   bool Intersects(const SelfType& aOther) const
   {
-    return (mStart - mFuzz < aOther.mEnd + aOther.mFuzz)
-           && (aOther.mStart - aOther.mFuzz < mEnd + mFuzz);
+    return (mStart - mFuzz < aOther.mEnd + aOther.mFuzz) &&
+           (aOther.mStart - aOther.mFuzz < mEnd + mFuzz);
   }
 
   bool IntersectsStrict(const SelfType& aOther) const
   {
     return mStart < aOther.mEnd && aOther.mStart < mEnd;
   }
 
   // Same as Intersects, but including the boundaries.
   bool Touches(const SelfType& aOther) const
   {
-    return (mStart - mFuzz <= aOther.mEnd + aOther.mFuzz)
-           && (aOther.mStart - aOther.mFuzz <= mEnd + mFuzz);
+    return (mStart - mFuzz <= aOther.mEnd + aOther.mFuzz) &&
+           (aOther.mStart - aOther.mFuzz <= mEnd + mFuzz);
   }
 
   // Returns true if aOther is strictly to the right of this and contiguous.
   // This operation isn't commutative.
   bool Contiguous(const SelfType& aOther) const
   {
     return mEnd <= aOther.mStart && aOther.mStart - mEnd <= mFuzz + aOther.mFuzz;
   }
@@ -240,19 +240,19 @@ public:
   {
     mFuzz = aFuzz;
   }
 
   // Returns true if the two intervals intersect with this being on the right
   // of aOther
   bool TouchesOnRight(const SelfType& aOther) const
   {
-    return aOther.mStart <= mStart
-           && (mStart - mFuzz <= aOther.mEnd + aOther.mFuzz)
-           && (aOther.mStart - aOther.mFuzz <= mEnd + mFuzz);
+    return aOther.mStart <= mStart &&
+           (mStart - mFuzz <= aOther.mEnd + aOther.mFuzz) &&
+           (aOther.mStart - aOther.mFuzz <= mEnd + mFuzz);
   }
 
   T mStart;
   T mEnd;
   T mFuzz;
 
 private:
 };
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -99,55 +99,52 @@ AudioData::TransferAndUpdateTimestampAnd
                                       aOther->mChannels,
                                       aOther->mRate);
   return v.forget();
 }
 
 static bool
 ValidatePlane(const VideoData::YCbCrBuffer::Plane& aPlane)
 {
-  return aPlane.mWidth <= PlanarYCbCrImage::MAX_DIMENSION
-         && aPlane.mHeight <= PlanarYCbCrImage::MAX_DIMENSION
-         && aPlane.mWidth * aPlane.mHeight < MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT
-         && aPlane.mStride > 0;
+  return aPlane.mWidth <= PlanarYCbCrImage::MAX_DIMENSION &&
+         aPlane.mHeight <= PlanarYCbCrImage::MAX_DIMENSION &&
+         aPlane.mWidth * aPlane.mHeight < MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT &&
+         aPlane.mStride > 0;
 }
 
 static bool ValidateBufferAndPicture(const VideoData::YCbCrBuffer& aBuffer,
                                      const IntRect& aPicture)
 {
   // The following situation should never happen unless there is a bug
   // in the decoder
-  if (aBuffer.mPlanes[1].mWidth != aBuffer.mPlanes[2].mWidth
-      || aBuffer.mPlanes[1].mHeight != aBuffer.mPlanes[2].mHeight) {
+  if (aBuffer.mPlanes[1].mWidth != aBuffer.mPlanes[2].mWidth ||
+      aBuffer.mPlanes[1].mHeight != aBuffer.mPlanes[2].mHeight) {
     NS_ERROR("C planes with different sizes");
     return false;
   }
 
   // The following situations could be triggered by invalid input
   if (aPicture.width <= 0 || aPicture.height <= 0) {
     // In debug mode, makes the error more noticeable
     MOZ_ASSERT(false, "Empty picture rect");
     return false;
   }
-  if (!ValidatePlane(aBuffer.mPlanes[0])
-      || !ValidatePlane(aBuffer.mPlanes[1])
-      || !ValidatePlane(aBuffer.mPlanes[2])) {
+  if (!ValidatePlane(aBuffer.mPlanes[0]) ||
+      !ValidatePlane(aBuffer.mPlanes[1]) ||
+      !ValidatePlane(aBuffer.mPlanes[2])) {
     NS_WARNING("Invalid plane size");
     return false;
   }
 
   // Ensure the picture size specified in the headers can be extracted out of
   // the frame we've been supplied without indexing out of bounds.
   CheckedUint32 xLimit = aPicture.x + CheckedUint32(aPicture.width);
   CheckedUint32 yLimit = aPicture.y + CheckedUint32(aPicture.height);
-  if (!xLimit.isValid()
-      || xLimit.value() > aBuffer.mPlanes[0].mStride
-      || !yLimit.isValid()
-      || yLimit.value() > aBuffer.mPlanes[0].mHeight)
-  {
+  if (!xLimit.isValid() || xLimit.value() > aBuffer.mPlanes[0].mStride ||
+      !yLimit.isValid() || yLimit.value() > aBuffer.mPlanes[0].mHeight) {
     // The specified picture dimensions can't be contained inside the video
     // frame, we'll stomp memory if we try to copy it. Fail.
     NS_WARNING("Overflowing picture rect");
     return false;
   }
   return true;
 }
 
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -752,22 +752,22 @@ MediaDecoder::EnsureTelemetryReported()
   if (mTelemetryReported || !mInfo) {
     // Note: sometimes we get multiple MetadataLoaded calls (for example
     // for chained ogg). So we ensure we don't report duplicate results for
     // these resources.
     return;
   }
 
   nsTArray<nsCString> codecs;
-  if (mInfo->HasAudio()
-      && !mInfo->mAudio.GetAsAudioInfo()->mMimeType.IsEmpty()) {
+  if (mInfo->HasAudio() &&
+      !mInfo->mAudio.GetAsAudioInfo()->mMimeType.IsEmpty()) {
     codecs.AppendElement(mInfo->mAudio.GetAsAudioInfo()->mMimeType);
   }
-  if (mInfo->HasVideo()
-      && !mInfo->mVideo.GetAsVideoInfo()->mMimeType.IsEmpty()) {
+  if (mInfo->HasVideo() &&
+      !mInfo->mVideo.GetAsVideoInfo()->mMimeType.IsEmpty()) {
     codecs.AppendElement(mInfo->mVideo.GetAsVideoInfo()->mMimeType);
   }
   if (codecs.IsEmpty()) {
     codecs.AppendElement(
       nsPrintfCString("resource; %s", ContainerType().OriginalString().Data()));
   }
   for (const nsCString& codec : codecs) {
     LOG("Telemetry MEDIA_CODEC_USED= '%s'", codec.get());
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -286,18 +286,18 @@ protected:
     auto copiedArgs = MakeTuple(Forward<Ts>(aArgs)...);
 
     // keep mMaster in a local object because mMaster will become invalid after
     // the current state object is deleted.
     auto master = mMaster;
 
     auto* s = new S(master);
 
-    MOZ_ASSERT(GetState() != s->GetState()
-               || GetState() == DECODER_STATE_SEEKING);
+    MOZ_ASSERT(GetState() != s->GetState() ||
+               GetState() == DECODER_STATE_SEEKING);
 
     SLOG("change state to: %s", ToStateStr(s->GetState()));
 
     Exit();
 
     master->mStateObj.reset(s);
     return CallEnterMemberFunction(s, copiedArgs,
                                    typename IndexSequenceFor<Ts...>::Type());
@@ -606,32 +606,31 @@ public:
     }
     mDormantTimer.Reset();
     mOnAudioPopped.DisconnectIfExists();
     mOnVideoPopped.DisconnectIfExists();
   }
 
   void Step() override
   {
-    if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING
-        && mMaster->IsPlaying()) {
+    if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING &&
+        mMaster->IsPlaying()) {
       // We're playing, but the element/decoder is in paused state. Stop
       // playing!
       mMaster->StopPlayback();
     }
 
     // Start playback if necessary so that the clock can be properly queried.
     if (!mIsPrerolling) {
       mMaster->MaybeStartPlayback();
     }
 
     mMaster->UpdatePlaybackPositionPeriodically();
 
-    MOZ_ASSERT(!mMaster->IsPlaying()
-               || mMaster->IsStateMachineScheduled(),
+    MOZ_ASSERT(!mMaster->IsPlaying() || mMaster->IsStateMachineScheduled(),
                "Must have timer scheduled");
 
     MaybeStartBuffering();
   }
 
   State GetState() const override
   {
     return DECODER_STATE_DECODING;
@@ -745,33 +744,33 @@ private:
 
   uint32_t VideoPrerollFrames() const
   {
     return mMaster->GetAmpleVideoFrames() / 2;
   }
 
   bool DonePrerollingAudio()
   {
-    return !mMaster->IsAudioDecoding()
-           || mMaster->GetDecodedAudioDuration()
-              >= AudioPrerollThreshold().MultDouble(mMaster->mPlaybackRate);
+    return !mMaster->IsAudioDecoding() ||
+           mMaster->GetDecodedAudioDuration()
+           >= AudioPrerollThreshold().MultDouble(mMaster->mPlaybackRate);
   }
 
   bool DonePrerollingVideo()
   {
-    return !mMaster->IsVideoDecoding()
-           || static_cast<uint32_t>(mMaster->VideoQueue().GetSize())
-              >= VideoPrerollFrames() * mMaster->mPlaybackRate + 1;
+    return !mMaster->IsVideoDecoding() ||
+           static_cast<uint32_t>(mMaster->VideoQueue().GetSize()) >=
+             VideoPrerollFrames() * mMaster->mPlaybackRate + 1;
   }
 
   void MaybeStopPrerolling()
   {
-    if (mIsPrerolling
-        && (DonePrerollingAudio() || mMaster->IsWaitingAudioData())
-        && (DonePrerollingVideo() || mMaster->IsWaitingVideoData())) {
+    if (mIsPrerolling &&
+        (DonePrerollingAudio() || mMaster->IsWaitingAudioData()) &&
+        (DonePrerollingVideo() || mMaster->IsWaitingVideoData())) {
       mIsPrerolling = false;
       // Check if we can start playback.
       mMaster->ScheduleStateMachine();
     }
   }
 
   void StartDormantTimer()
   {
@@ -1183,19 +1182,19 @@ protected:
   virtual void RequestVideoData()
   {
     MOZ_ASSERT(!mDoneVideoSeeking);
     mMaster->RequestVideoData(media::TimeUnit());
   }
 
   void AdjustFastSeekIfNeeded(MediaData* aSample)
   {
-    if (mSeekJob.mTarget->IsFast()
-        && mSeekJob.mTarget->GetTime() > mCurrentTimeBeforeSeek
-        && aSample->mTime < mCurrentTimeBeforeSeek) {
+    if (mSeekJob.mTarget->IsFast() &&
+        mSeekJob.mTarget->GetTime() > mCurrentTimeBeforeSeek &&
+        aSample->mTime < mCurrentTimeBeforeSeek) {
       // We are doing a fastSeek, but we ended up *before* the previous
       // playback position. This is surprising UX, so switch to an accurate
       // seek and decode to the seek target. This is not conformant to the
       // spec, fastSeek should always be fast, but until we get the time to
       // change all Readers to seek to the keyframe after the currentTime
       // in this case, we'll just decode forward. Bug 1026330.
       mSeekJob.mTarget->SetType(SeekTarget::Accurate);
     }
@@ -1508,18 +1507,18 @@ private:
   void DoSeekInternal()
   {
     // We don't need to discard frames to the mCurrentTime here because we have
     // done it at DoSeek() and any video data received in between either
     // finishes the seek operation or be discarded, see HandleVideoDecoded().
 
     if (!NeedMoreVideo()) {
       FinishSeek();
-    } else if (!mMaster->IsRequestingVideoData()
-               && !mMaster->IsWaitingVideoData()) {
+    } else if (!mMaster->IsRequestingVideoData() &&
+               !mMaster->IsWaitingVideoData()) {
       RequestVideoData();
     }
   }
 
   class AysncNextFrameSeekTask : public Runnable
   {
   public:
     explicit AysncNextFrameSeekTask(NextFrameSeekingState* aStateObject)
@@ -1546,18 +1545,17 @@ private:
   void RequestVideoData()
   {
     mMaster->RequestVideoData(media::TimeUnit());
   }
 
   bool NeedMoreVideo() const
   {
     // Need to request video when we have none and video queue is not finished.
-    return VideoQueue().GetSize() == 0
-           && !VideoQueue().IsFinished();
+    return VideoQueue().GetSize() == 0 && !VideoQueue().IsFinished();
   }
 
   // Update the seek target's time before resolving this seek task, the updated
   // time will be used in the MDSM::SeekCompleted() to update the MDSM's
   // position.
   void UpdateSeekTargetTime()
   {
     RefPtr<VideoData> data = VideoQueue().PeekFront();
@@ -1889,48 +1887,47 @@ public:
     // we couldn't release it if we still need to render the frame.
 #ifndef MOZ_WIDGET_ANDROID
     if (!mMaster->mLooping) {
       // We've decoded all samples.
       // We don't need decoders anymore if not looping.
       Reader()->ReleaseResources();
     }
 #endif
-    bool hasNextFrame = (!mMaster->HasAudio() || !mMaster->mAudioCompleted)
-                        && (!mMaster->HasVideo() || !mMaster->mVideoCompleted);
+    bool hasNextFrame = (!mMaster->HasAudio() || !mMaster->mAudioCompleted) &&
+                        (!mMaster->HasVideo() || !mMaster->mVideoCompleted);
 
     mMaster->UpdateNextFrameStatus(
       hasNextFrame ? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
                    : MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE);
 
     Step();
   }
 
   void Exit() override
   {
     mSentPlaybackEndedEvent = false;
   }
 
   void Step() override
   {
-    if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING
-        && mMaster->IsPlaying()) {
+    if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING &&
+        mMaster->IsPlaying()) {
       mMaster->StopPlayback();
     }
 
     // Play the remaining media. We want to run AdvanceFrame() at least
     // once to ensure the current playback position is advanced to the
     // end of the media, and so that we update the readyState.
-    if ((mMaster->HasVideo() && !mMaster->mVideoCompleted)
-        || (mMaster->HasAudio() && !mMaster->mAudioCompleted)) {
+    if ((mMaster->HasVideo() && !mMaster->mVideoCompleted) ||
+        (mMaster->HasAudio() && !mMaster->mAudioCompleted)) {
       // Start playback if necessary to play the remaining media.
       mMaster->MaybeStartPlayback();
       mMaster->UpdatePlaybackPositionPeriodically();
-      MOZ_ASSERT(!mMaster->IsPlaying()
-                 || mMaster->IsStateMachineScheduled(),
+      MOZ_ASSERT(!mMaster->IsPlaying() || mMaster->IsStateMachineScheduled(),
                  "Must have timer scheduled");
       return;
     }
 
     // StopPlayback in order to reset the IsPlaying() state so audio
     // is restarted correctly.
     mMaster->StopPlayback();
 
@@ -2227,18 +2224,18 @@ DecodingFirstFrameState::Enter()
 }
 
 void
 MediaDecoderStateMachine::
 DecodingFirstFrameState::MaybeFinishDecodeFirstFrame()
 {
   MOZ_ASSERT(!mMaster->mSentFirstFrameLoadedEvent);
 
-  if ((mMaster->IsAudioDecoding() && AudioQueue().GetSize() == 0)
-      || (mMaster->IsVideoDecoding() && VideoQueue().GetSize() == 0)) {
+  if ((mMaster->IsAudioDecoding() && AudioQueue().GetSize() == 0) ||
+      (mMaster->IsVideoDecoding() && VideoQueue().GetSize() == 0)) {
     return;
   }
 
   mMaster->FinishDecodeFirstFrame();
   if (mPendingSeek.Exists()) {
     SetSeekingState(Move(mPendingSeek), EventVisibility::Observable);
   } else {
     SetState<DecodingState>();
@@ -2246,19 +2243,19 @@ DecodingFirstFrameState::MaybeFinishDeco
 }
 
 void
 MediaDecoderStateMachine::
 DecodingState::Enter()
 {
   MOZ_ASSERT(mMaster->mSentFirstFrameLoadedEvent);
 
-  if (mMaster->mVideoDecodeMode == VideoDecodeMode::Suspend
-      && !mMaster->mVideoDecodeSuspendTimer.IsScheduled()
-      && !mMaster->mVideoDecodeSuspended) {
+  if (mMaster->mVideoDecodeMode == VideoDecodeMode::Suspend &&
+      !mMaster->mVideoDecodeSuspendTimer.IsScheduled() &&
+      !mMaster->mVideoDecodeSuspended) {
     // If the VideoDecodeMode is Suspend and the timer is not schedule, it means
     // the timer has timed out and we should suspend video decoding now if
     // necessary.
     HandleVideoSuspendTimeout();
   }
 
   if (!mMaster->IsVideoDecoding() && !mMaster->IsAudioDecoding()) {
     SetState<CompletedState>();
@@ -2318,48 +2315,48 @@ DecodingState::HandleEndOfVideo()
     MaybeStopPrerolling();
   }
 }
 
 void
 MediaDecoderStateMachine::
 DecodingState::DispatchDecodeTasksIfNeeded()
 {
-  if (mMaster->IsAudioDecoding()
-      && !mMaster->mMinimizePreroll
-      && !mMaster->HaveEnoughDecodedAudio()) {
+  if (mMaster->IsAudioDecoding() &&
+      !mMaster->mMinimizePreroll &&
+      !mMaster->HaveEnoughDecodedAudio()) {
     EnsureAudioDecodeTaskQueued();
   }
 
-  if (mMaster->IsVideoDecoding()
-      && !mMaster->mMinimizePreroll
-      && !mMaster->HaveEnoughDecodedVideo()) {
+  if (mMaster->IsVideoDecoding() &&
+      !mMaster->mMinimizePreroll &&
+      !mMaster->HaveEnoughDecodedVideo()) {
     EnsureVideoDecodeTaskQueued();
   }
 }
 
 void
 MediaDecoderStateMachine::
 DecodingState::EnsureAudioDecodeTaskQueued()
 {
-  if (!mMaster->IsAudioDecoding()
-      || mMaster->IsRequestingAudioData()
-      || mMaster->IsWaitingAudioData()) {
+  if (!mMaster->IsAudioDecoding() ||
+      mMaster->IsRequestingAudioData() ||
+      mMaster->IsWaitingAudioData()) {
     return;
   }
   mMaster->RequestAudioData();
 }
 
 void
 MediaDecoderStateMachine::
 DecodingState::EnsureVideoDecodeTaskQueued()
 {
-  if (!mMaster->IsVideoDecoding()
-      || mMaster->IsRequestingVideoData()
-      || mMaster->IsWaitingVideoData()) {
+  if (!mMaster->IsVideoDecoding() ||
+      mMaster->IsRequestingVideoData() ||
+      mMaster->IsWaitingVideoData()) {
     return;
   }
   mMaster->RequestVideoData(mMaster->GetMediaTime());
 }
 
 void
 MediaDecoderStateMachine::
 DecodingState::MaybeStartBuffering()
@@ -2479,26 +2476,28 @@ BufferingState::Step()
       !mMaster->HasLowBufferedData(TimeUnit::FromSeconds(mBufferingWait));
     if (!stopBuffering) {
       SLOG("Buffering: wait %ds, timeout in %.3lfs",
            mBufferingWait, mBufferingWait - elapsed.ToSeconds());
       mMaster->ScheduleStateMachineIn(TimeUnit::FromMicroseconds(USECS_PER_S));
       return;
     }
   } else if (mMaster->OutOfDecodedAudio() || mMaster->OutOfDecodedVideo()) {
-    MOZ_ASSERT(!mMaster->OutOfDecodedAudio()
-               || mMaster->IsRequestingAudioData()
-               || mMaster->IsWaitingAudioData());
-    MOZ_ASSERT(!mMaster->OutOfDecodedVideo()
-               || mMaster->IsRequestingVideoData()
-               || mMaster->IsWaitingVideoData());
+    MOZ_ASSERT(!mMaster->OutOfDecodedAudio() ||
+               mMaster->IsRequestingAudioData() ||
+               mMaster->IsWaitingAudioData());
+    MOZ_ASSERT(!mMaster->OutOfDecodedVideo() ||
+               mMaster->IsRequestingVideoData() ||
+               mMaster->IsWaitingVideoData());
     SLOG("In buffering mode, waiting to be notified: outOfAudio: %d, "
          "mAudioStatus: %s, outOfVideo: %d, mVideoStatus: %s",
-         mMaster->OutOfDecodedAudio(), mMaster->AudioRequestStatus(),
-         mMaster->OutOfDecodedVideo(), mMaster->VideoRequestStatus());
+         mMaster->OutOfDecodedAudio(),
+         mMaster->AudioRequestStatus(),
+         mMaster->OutOfDecodedVideo(),
+         mMaster->VideoRequestStatus());
     return;
   }
 
   SLOG("Buffered for %.3lfs", (now - mBufferingStart).ToSeconds());
   SetState<DecodingState>();
 }
 
 void
@@ -2733,18 +2732,17 @@ MediaDecoderStateMachine::GetDecodedAudi
   return TimeUnit::FromMicroseconds(AudioQueue().Duration());
 }
 
 bool
 MediaDecoderStateMachine::HaveEnoughDecodedAudio()
 {
   MOZ_ASSERT(OnTaskQueue());
   auto ampleAudio = mAmpleAudioThreshold.MultDouble(mPlaybackRate);
-  return AudioQueue().GetSize() > 0
-         && GetDecodedAudioDuration() >= ampleAudio;
+  return AudioQueue().GetSize() > 0 && GetDecodedAudioDuration() >= ampleAudio;
 }
 
 bool MediaDecoderStateMachine::HaveEnoughDecodedVideo()
 {
   MOZ_ASSERT(OnTaskQueue());
   return VideoQueue().GetSize() >= GetAmpleVideoFrames() * mPlaybackRate + 1;
 }
 
@@ -2909,18 +2907,18 @@ MediaDecoderStateMachine::UpdatePlayback
 }
 
 void
 MediaDecoderStateMachine::UpdatePlaybackPosition(const TimeUnit& aTime)
 {
   MOZ_ASSERT(OnTaskQueue());
   UpdatePlaybackPositionInternal(aTime);
 
-  bool fragmentEnded = mFragmentEndTime.IsValid()
-    && GetMediaTime() >= mFragmentEndTime;
+  bool fragmentEnded =
+    mFragmentEndTime.IsValid() && GetMediaTime() >= mFragmentEndTime;
   mMetadataManager.DispatchMetadataIfNeeded(aTime);
 
   if (fragmentEnded) {
     StopPlayback();
   }
 }
 
 /* static */ const char*
@@ -3254,43 +3252,42 @@ MediaDecoderStateMachine::StartMediaSink
     }
   }
 }
 
 bool
 MediaDecoderStateMachine::HasLowDecodedAudio()
 {
   MOZ_ASSERT(OnTaskQueue());
-  return IsAudioDecoding()
-         && GetDecodedAudioDuration()
-            < EXHAUSTED_DATA_MARGIN.MultDouble(mPlaybackRate);
+  return IsAudioDecoding() && GetDecodedAudioDuration()
+                              < EXHAUSTED_DATA_MARGIN.MultDouble(mPlaybackRate);
 }
 
 bool
 MediaDecoderStateMachine::HasLowDecodedVideo()
 {
   MOZ_ASSERT(OnTaskQueue());
-  return IsVideoDecoding()
-         && VideoQueue().GetSize() < LOW_VIDEO_FRAMES * mPlaybackRate;
+  return IsVideoDecoding() &&
+         VideoQueue().GetSize() < LOW_VIDEO_FRAMES * mPlaybackRate;
 }
 
 bool
 MediaDecoderStateMachine::HasLowDecodedData()
 {
   MOZ_ASSERT(OnTaskQueue());
   MOZ_ASSERT(mReader->UseBufferingHeuristics());
   return HasLowDecodedAudio() || HasLowDecodedVideo();
 }
 
 bool MediaDecoderStateMachine::OutOfDecodedAudio()
 {
     MOZ_ASSERT(OnTaskQueue());
-    return IsAudioDecoding() && !AudioQueue().IsFinished()
-           && AudioQueue().GetSize() == 0
-           && !mMediaSink->HasUnplayedFrames(TrackInfo::kAudioTrack);
+    return IsAudioDecoding() && !AudioQueue().IsFinished() &&
+           AudioQueue().GetSize() == 0 &&
+           !mMediaSink->HasUnplayedFrames(TrackInfo::kAudioTrack);
 }
 
 bool
 MediaDecoderStateMachine::HasLowBufferedData()
 {
   MOZ_ASSERT(OnTaskQueue());
   return HasLowBufferedData(detail::LOW_BUFFER_THRESHOLD);
 }
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -549,18 +549,18 @@ public:
     , mOwner(WrapNotNull(aOwner)) { }
 
   void CreateDecoder(TrackType aTrack);
 
   // Shutdown any decoder pending initialization and reset mAudio/mVideo to its
   // pristine state so CreateDecoder() is ready to be called again immediately.
   void ShutdownDecoder(TrackType aTrack)
   {
-    MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack
-               || aTrack == TrackInfo::kVideoTrack);
+    MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack ||
+               aTrack == TrackInfo::kVideoTrack);
     auto& data = aTrack == TrackInfo::kAudioTrack ? mAudio : mVideo;
     data.mPolicy->Cancel();
     data.mTokenRequest.DisconnectIfExists();
     data.mInitRequest.DisconnectIfExists();
     if (data.mDecoder) {
       mOwner->mShutdownPromisePool->ShutdownDecoder(data.mDecoder.forget());
     }
     data.mStage = Stage::None;
@@ -600,18 +600,18 @@ private:
 
   // guaranteed to be valid by the owner.
   const NotNull<MediaFormatReader*> mOwner;
 };
 
 void
 MediaFormatReader::DecoderFactory::CreateDecoder(TrackType aTrack)
 {
-  MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack
-             || aTrack == TrackInfo::kVideoTrack);
+  MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack ||
+             aTrack == TrackInfo::kVideoTrack);
   RunStage(aTrack == TrackInfo::kAudioTrack ? mAudio : mVideo);
 }
 
 class MediaFormatReader::DecoderFactory::Wrapper : public MediaDataDecoder
 {
   using Token = GlobalAllocPolicy::Token;
 
 public:
@@ -1391,18 +1391,18 @@ MediaFormatReader::OnDemuxerInitDone(con
     if (!mVideo.mTrackDemuxer) {
       mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__);
       return;
     }
 
     UniquePtr<TrackInfo> videoInfo = mVideo.mTrackDemuxer->GetInfo();
     videoActive = videoInfo && videoInfo->IsValid();
     if (videoActive) {
-      if (platform
-          && !platform->SupportsMimeType(videoInfo->mMimeType, nullptr)) {
+      if (platform &&
+          !platform->SupportsMimeType(videoInfo->mMimeType, nullptr)) {
         // We have no decoder for this track. Error.
         mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__);
         return;
       }
       {
         MutexAutoLock lock(mVideo.mMutex);
         mInfo.mVideo = *videoInfo->GetAsVideoInfo();
       }
@@ -1423,20 +1423,18 @@ MediaFormatReader::OnDemuxerInitDone(con
     if (!mAudio.mTrackDemuxer) {
       mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__);
       return;
     }
 
     UniquePtr<TrackInfo> audioInfo = mAudio.mTrackDemuxer->GetInfo();
     // We actively ignore audio tracks that we know we can't play.
     audioActive =
-      audioInfo
-      && audioInfo->IsValid()
-      && (!platform || platform->SupportsMimeType(audioInfo->mMimeType,
-                                                  nullptr));
+      audioInfo && audioInfo->IsValid() &&
+      (!platform || platform->SupportsMimeType(audioInfo->mMimeType, nullptr));
 
     if (audioActive) {
       {
         MutexAutoLock lock(mAudio.mMutex);
         mInfo.mAudio = *audioInfo->GetAsAudioInfo();
       }
       for (const MetadataTag& tag : audioInfo->mTags) {
         tags->Put(tag.mKey, tag.mValue);
@@ -1503,18 +1501,18 @@ MediaFormatReader::OnDemuxerInitDone(con
   MaybeResolveMetadataPromise();
 }
 
 void
 MediaFormatReader::MaybeResolveMetadataPromise()
 {
   MOZ_ASSERT(OnTaskQueue());
 
-  if ((HasAudio() && mAudio.mFirstDemuxedSampleTime.isNothing())
-      || (HasVideo() && mVideo.mFirstDemuxedSampleTime.isNothing())) {
+  if ((HasAudio() && mAudio.mFirstDemuxedSampleTime.isNothing()) ||
+      (HasVideo() && mVideo.mFirstDemuxedSampleTime.isNothing())) {
     return;
   }
 
   TimeUnit startTime =
     std::min(mAudio.mFirstDemuxedSampleTime.refOr(TimeUnit::FromInfinity()),
              mVideo.mFirstDemuxedSampleTime.refOr(TimeUnit::FromInfinity()));
 
   if (!startTime.IsInfinite()) {
@@ -1531,18 +1529,18 @@ MediaFormatReader::MaybeResolveMetadataP
   UpdateBuffered();
 
   mMetadataPromise.Resolve(Move(metadata), __func__);
 }
 
 bool
 MediaFormatReader::IsEncrypted() const
 {
-  return (HasAudio() && mInfo.mAudio.mCrypto.mValid)
-         || (HasVideo() && mInfo.mVideo.mCrypto.mValid);
+  return (HasAudio() && mInfo.mAudio.mCrypto.mValid) ||
+         (HasVideo() && mInfo.mVideo.mCrypto.mValid);
 }
 
 void
 MediaFormatReader::OnDemuxerInitFailed(const MediaResult& aError)
 {
   mDemuxerInitRequest.Complete();
   mMetadataPromise.Reject(aError, __func__);
 }
@@ -1551,18 +1549,18 @@ void
 MediaFormatReader::ReadUpdatedMetadata(MediaInfo* aInfo)
 {
   *aInfo = mInfo;
 }
 
 MediaFormatReader::DecoderData&
 MediaFormatReader::GetDecoderData(TrackType aTrack)
 {
-  MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack
-             || aTrack == TrackInfo::kVideoTrack);
+  MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack ||
+             aTrack == TrackInfo::kVideoTrack);
   if (aTrack == TrackInfo::kAudioTrack) {
     return mAudio;
   }
   return mVideo;
 }
 
 bool
 MediaFormatReader::ShouldSkip(TimeUnit aTimeThreshold)
@@ -1575,32 +1573,31 @@ MediaFormatReader::ShouldSkip(TimeUnit a
 
   TimeUnit nextKeyframe;
   nsresult rv = mVideo.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe);
   if (NS_FAILED(rv)) {
     // Only OggTrackDemuxer with video type gets into here.
     // We don't support skip-to-next-frame for this case.
     return false;
   }
-  return (nextKeyframe <= aTimeThreshold
-          || (mVideo.mTimeThreshold
-              && mVideo.mTimeThreshold.ref().EndTime() < aTimeThreshold))
-         && nextKeyframe.ToMicroseconds() >= 0
-         && !nextKeyframe.IsInfinite();
+  return (nextKeyframe <= aTimeThreshold ||
+          (mVideo.mTimeThreshold &&
+           mVideo.mTimeThreshold.ref().EndTime() < aTimeThreshold)) &&
+         nextKeyframe.ToMicroseconds() >= 0 && !nextKeyframe.IsInfinite();
 }
 
 RefPtr<MediaFormatReader::VideoDataPromise>
 MediaFormatReader::RequestVideoData(const TimeUnit& aTimeThreshold)
 {
   MOZ_ASSERT(OnTaskQueue());
   MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty(),
                         "No sample requests allowed while seeking");
   MOZ_DIAGNOSTIC_ASSERT(!mVideo.HasPromise(), "No duplicate sample requests");
-  MOZ_DIAGNOSTIC_ASSERT(!mVideo.mSeekRequest.Exists()
-                        || mVideo.mTimeThreshold.isSome());
+  MOZ_DIAGNOSTIC_ASSERT(!mVideo.mSeekRequest.Exists() ||
+                        mVideo.mTimeThreshold.isSome());
   MOZ_DIAGNOSTIC_ASSERT(!IsSeeking(), "called mid-seek");
   LOGV("RequestVideoData(%" PRId64 ")", aTimeThreshold.ToMicroseconds());
 
   if (!HasVideo()) {
     LOG("called with no video track");
     return VideoDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                                              __func__);
   }
@@ -1706,19 +1703,18 @@ MediaFormatReader::OnVideoDemuxCompleted
 
 RefPtr<MediaFormatReader::AudioDataPromise>
 MediaFormatReader::RequestAudioData()
 {
   MOZ_ASSERT(OnTaskQueue());
   MOZ_DIAGNOSTIC_ASSERT(!mAudio.HasPromise(), "No duplicate sample requests");
   MOZ_DIAGNOSTIC_ASSERT(IsVideoSeeking() || mSeekPromise.IsEmpty(),
                         "No sample requests allowed while seeking");
-  MOZ_DIAGNOSTIC_ASSERT(IsVideoSeeking()
-                        || !mAudio.mSeekRequest.Exists()
-                        || mAudio.mTimeThreshold.isSome());
+  MOZ_DIAGNOSTIC_ASSERT(IsVideoSeeking() || !mAudio.mSeekRequest.Exists() ||
+                        mAudio.mTimeThreshold.isSome());
   MOZ_DIAGNOSTIC_ASSERT(IsVideoSeeking() || !IsSeeking(), "called mid-seek");
   LOGV("");
 
   if (!HasAudio()) {
     LOG("called with no audio track");
     return AudioDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                                              __func__);
   }
@@ -1866,23 +1862,23 @@ MediaFormatReader::NotifyEndOfStream(Tra
 }
 
 bool
 MediaFormatReader::NeedInput(DecoderData& aDecoder)
 {
   // The decoder will not be fed a new raw sample until the current decoding
   // requests has completed.
   return
-    (aDecoder.HasPromise() || aDecoder.mTimeThreshold.isSome())
-    && !aDecoder.HasPendingDrain()
-    && !aDecoder.HasFatalError()
-    && !aDecoder.mDemuxRequest.Exists()
-    && !aDecoder.mOutput.Length()
-    && !aDecoder.HasInternalSeekPending()
-    && !aDecoder.mDecodeRequest.Exists();
+    (aDecoder.HasPromise() || aDecoder.mTimeThreshold.isSome()) &&
+    !aDecoder.HasPendingDrain() &&
+    !aDecoder.HasFatalError() &&
+    !aDecoder.mDemuxRequest.Exists() &&
+    !aDecoder.mOutput.Length() &&
+    !aDecoder.HasInternalSeekPending() &&
+    !aDecoder.mDecodeRequest.Exists();
 }
 
 void
 MediaFormatReader::ScheduleUpdate(TrackType aTrack)
 {
   MOZ_ASSERT(OnTaskQueue());
   if (mShutdown) {
     return;
@@ -1940,28 +1936,28 @@ MediaFormatReader::UpdateReceivedNewData
     decoder.mTimeThreshold.ref().mWaiting = false;
   }
   decoder.mWaitingForData = false;
 
   if (decoder.HasFatalError()) {
     return false;
   }
 
-  if (!mSeekPromise.IsEmpty()
-      && (!IsVideoSeeking() || aTrack == TrackInfo::kVideoTrack)) {
+  if (!mSeekPromise.IsEmpty() &&
+      (!IsVideoSeeking() || aTrack == TrackInfo::kVideoTrack)) {
     MOZ_ASSERT(!decoder.HasPromise());
     MOZ_DIAGNOSTIC_ASSERT(
       (IsVideoSeeking() || !mAudio.mTimeThreshold) && !mVideo.mTimeThreshold,
       "InternalSeek must have been aborted when Seek was first called");
     MOZ_DIAGNOSTIC_ASSERT(
-      (IsVideoSeeking() || !mAudio.HasWaitingPromise())
-      && !mVideo.HasWaitingPromise(),
+      (IsVideoSeeking() || !mAudio.HasWaitingPromise()) &&
+      !mVideo.HasWaitingPromise(),
       "Waiting promises must have been rejected when Seek was first called");
-    if (mVideo.mSeekRequest.Exists()
-        || (!IsVideoSeeking() && mAudio.mSeekRequest.Exists())) {
+    if (mVideo.mSeekRequest.Exists() ||
+        (!IsVideoSeeking() && mAudio.mSeekRequest.Exists())) {
       // Already waiting for a seek to complete. Nothing more to do.
       return true;
     }
     LOG("Attempting Seek");
     ScheduleSeek();
     return true;
   }
   if (decoder.HasInternalSeekPending() || decoder.HasWaitingPromise()) {
@@ -2060,25 +2056,25 @@ MediaFormatReader::HandleDemuxedSamples(
   LOGV("Giving %s input to decoder", TrackTypeToStr(aTrack));
 
   // Decode all our demuxed frames.
   while (decoder.mQueuedSamples.Length()) {
     RefPtr<MediaRawData> sample = decoder.mQueuedSamples[0];
     RefPtr<TrackInfoSharedPtr> info = sample->mTrackInfo;
 
     if (info && decoder.mLastStreamSourceID != info->GetID()) {
-      bool recyclable = MediaPrefs::MediaDecoderCheckRecycling()
-                        && decoder.mDecoder->SupportDecoderRecycling();
-      if (!recyclable
-          && decoder.mTimeThreshold.isNothing()
-          && (decoder.mNextStreamSourceID.isNothing()
-              || decoder.mNextStreamSourceID.ref() != info->GetID())) {
+      bool recyclable = MediaPrefs::MediaDecoderCheckRecycling() &&
+                        decoder.mDecoder->SupportDecoderRecycling();
+      if (!recyclable && decoder.mTimeThreshold.isNothing() &&
+          (decoder.mNextStreamSourceID.isNothing() ||
+           decoder.mNextStreamSourceID.ref() != info->GetID())) {
         LOG("%s stream id has changed from:%d to:%d, draining decoder.",
-          TrackTypeToStr(aTrack), decoder.mLastStreamSourceID,
-          info->GetID());
+            TrackTypeToStr(aTrack),
+            decoder.mLastStreamSourceID,
+            info->GetID());
         decoder.RequestDrain();
         decoder.mNextStreamSourceID = Some(info->GetID());
         ScheduleUpdate(aTrack);
         return;
       }
 
       LOG("%s stream id has changed from:%d to:%d.",
           TrackTypeToStr(aTrack), decoder.mLastStreamSourceID,
@@ -2182,19 +2178,19 @@ void
 MediaFormatReader::DrainDecoder(TrackType aTrack)
 {
   MOZ_ASSERT(OnTaskQueue());
 
   auto& decoder = GetDecoderData(aTrack);
   if (decoder.mDrainState == DrainState::Draining) {
     return;
   }
-  if (!decoder.mDecoder
-      || (decoder.mDrainState != DrainState::PartialDrainPending
-          && decoder.mNumSamplesInput == decoder.mNumSamplesOutput)) {
+  if (!decoder.mDecoder ||
+      (decoder.mDrainState != DrainState::PartialDrainPending &&
+       decoder.mNumSamplesInput == decoder.mNumSamplesOutput)) {
     // No frames to drain.
     LOGV("Draining %s with nothing to drain", TrackTypeToStr(aTrack));
     decoder.mDrainState = DrainState::DrainAborted;
     ScheduleUpdate(aTrack);
     return;
   }
 
   decoder.mDrainState = DrainState::Draining;
@@ -2252,18 +2248,18 @@ MediaFormatReader::Update(TrackType aTra
   }
 
   if (decoder.mSeekRequest.Exists()) {
     LOGV("Seeking hasn't completed, nothing more to do");
     return;
   }
 
   MOZ_DIAGNOSTIC_ASSERT(
-    !decoder.HasInternalSeekPending()
-    || (!decoder.mOutput.Length() && !decoder.mQueuedSamples.Length()),
+    !decoder.HasInternalSeekPending() ||
+      (!decoder.mOutput.Length() && !decoder.mQueuedSamples.Length()),
     "No frames can be demuxed or decoded while an internal seek is pending");
 
   // Record number of frames decoded and parsed. Automatically update the
   // stats counters using the AutoNotifyDecoded stack-based class.
   FrameStatistics::AutoNotifyDecoded a(mFrameStats);
 
   // Drop any frames found prior our internal seek target.
   while (decoder.mTimeThreshold && decoder.mOutput.Length()) {
@@ -2282,18 +2278,18 @@ MediaFormatReader::Update(TrackType aTra
            output->mTime.ToSeconds(),
            target.Time().ToSeconds(),
            output->mKeyframe);
       decoder.mOutput.RemoveElementAt(0);
       decoder.mSizeOfQueue -= 1;
     }
   }
 
-  while (decoder.mOutput.Length()
-         && decoder.mOutput[0]->mType == MediaData::NULL_DATA) {
+  while (decoder.mOutput.Length() &&
+         decoder.mOutput[0]->mType == MediaData::NULL_DATA) {
     LOGV("Dropping null data. Time: %" PRId64,
          decoder.mOutput[0]->mTime.ToMicroseconds());
     decoder.mOutput.RemoveElementAt(0);
     decoder.mSizeOfQueue -= 1;
   }
 
   if (decoder.HasPromise()) {
     needOutput = true;
@@ -2341,19 +2337,19 @@ MediaFormatReader::Update(TrackType aTra
       LOG("Rejecting %s promise: DECODE_ERROR", TrackTypeToStr(aTrack));
       decoder.RejectPromise(decoder.mError.ref(), __func__);
       return;
     } else if (decoder.HasCompletedDrain()) {
       if (decoder.mDemuxEOS) {
         LOG("Rejecting %s promise: EOS", TrackTypeToStr(aTrack));
         decoder.RejectPromise(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__);
       } else if (decoder.mWaitingForData) {
-        if (decoder.mDrainState == DrainState::DrainCompleted
-            && decoder.mLastDecodedSampleTime
-            && !decoder.mNextStreamSourceID) {
+        if (decoder.mDrainState == DrainState::DrainCompleted &&
+            decoder.mLastDecodedSampleTime &&
+            !decoder.mNextStreamSourceID) {
           // We have completed draining the decoder following WaitingForData.
           // Set up the internal seek machinery to be able to resume from the
           // last sample decoded.
           LOG("Seeking to last sample time: %" PRId64,
               decoder.mLastDecodedSampleTime.ref().mStart.ToMicroseconds());
           InternalSeek(aTrack,
                        InternalSeekTarget(decoder.mLastDecodedSampleTime.ref(), true));
         }
@@ -2367,47 +2363,47 @@ MediaFormatReader::Update(TrackType aTra
 
       // Now that draining has completed, we check if we have received
       // new data again as the result may now be different from the earlier
       // run.
       if (UpdateReceivedNewData(aTrack) || decoder.mSeekRequest.Exists()) {
         LOGV("Nothing more to do");
         return;
       }
-    } else if (decoder.mDemuxEOS
-               && !decoder.HasPendingDrain()
-               && decoder.mQueuedSamples.IsEmpty()) {
+    } else if (decoder.mDemuxEOS &&
+               !decoder.HasPendingDrain() &&
+               decoder.mQueuedSamples.IsEmpty()) {
       // It is possible to transition from WAITING_FOR_DATA directly to EOS
       // state during the internal seek; in which case no draining would occur.
       // There is no more samples left to be decoded and we are already in
       // EOS state. We can immediately reject the data promise.
       LOG("Rejecting %s promise: EOS", TrackTypeToStr(aTrack));
       decoder.RejectPromise(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__);
     } else if (decoder.mWaitingForKey) {
       LOG("Rejecting %s promise: WAITING_FOR_DATA due to waiting for key",
           TrackTypeToStr(aTrack));
       decoder.RejectPromise(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA, __func__);
     }
   }
 
-  if (decoder.mDrainState == DrainState::DrainRequested
-      || decoder.mDrainState == DrainState::PartialDrainPending) {
+  if (decoder.mDrainState == DrainState::DrainRequested ||
+      decoder.mDrainState == DrainState::PartialDrainPending) {
     if (decoder.mOutput.IsEmpty()) {
       DrainDecoder(aTrack);
     }
     return;
   }
 
   if (decoder.mError && !decoder.HasFatalError()) {
     MOZ_RELEASE_ASSERT(!decoder.HasInternalSeekPending(),
                        "No error can occur while an internal seek is pending");
     bool needsNewDecoder =
       decoder.mError.ref() == NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER;
-    if (!needsNewDecoder
-        && ++decoder.mNumOfConsecutiveError > decoder.mMaxConsecutiveError) {
+    if (!needsNewDecoder &&
+        ++decoder.mNumOfConsecutiveError > decoder.mMaxConsecutiveError) {
       NotifyError(aTrack, decoder.mError.ref());
       return;
     }
     decoder.mError.reset();
 
     LOG("%s decoded error count %d", TrackTypeToStr(aTrack),
         decoder.mNumOfConsecutiveError);
 
@@ -2459,19 +2455,19 @@ MediaFormatReader::Update(TrackType aTra
        decoder.mLastStreamSourceID);
 
   if (IsWaitingOnCDMResource()) {
     // If the content is encrypted, MFR won't start to create decoder until
     // CDMProxy is set.
     return;
   }
 
-  if ((decoder.mWaitingForData
-       && (!decoder.mTimeThreshold || decoder.mTimeThreshold.ref().mWaiting))
-      || (decoder.mWaitingForKey && decoder.mDecodeRequest.Exists())) {
+  if ((decoder.mWaitingForData &&
+       (!decoder.mTimeThreshold || decoder.mTimeThreshold.ref().mWaiting)) ||
+      (decoder.mWaitingForKey && decoder.mDecodeRequest.Exists())) {
     // Nothing more we can do at present.
     LOGV("Still waiting for data or key.");
     return;
   }
 
   if (decoder.CancelWaitingForKey()) {
     LOGV("No longer waiting for key. Resolving waiting promise");
     return;
@@ -2489,44 +2485,48 @@ MediaFormatReader::Update(TrackType aTra
   HandleDemuxedSamples(aTrack, a);
 }
 
 void
 MediaFormatReader::ReturnOutput(MediaData* aData, TrackType aTrack)
 {
   MOZ_ASSERT(GetDecoderData(aTrack).HasPromise());
   MOZ_DIAGNOSTIC_ASSERT(aData->mType != MediaData::NULL_DATA);
-  LOG("Resolved data promise for %s [%" PRId64 ", %" PRId64 "]", TrackTypeToStr(aTrack),
-      aData->mTime.ToMicroseconds(), aData->GetEndTime().ToMicroseconds());
+  LOG("Resolved data promise for %s [%" PRId64 ", %" PRId64 "]",
+      TrackTypeToStr(aTrack),
+      aData->mTime.ToMicroseconds(),
+      aData->GetEndTime().ToMicroseconds());
 
   if (aTrack == TrackInfo::kAudioTrack) {
     AudioData* audioData = static_cast<AudioData*>(aData);
 
-    if (audioData->mChannels != mInfo.mAudio.mChannels
-        || audioData->mRate != mInfo.mAudio.mRate) {
+    if (audioData->mChannels != mInfo.mAudio.mChannels ||
+        audioData->mRate != mInfo.mAudio.mRate) {
       LOG("change of audio format (rate:%d->%d). "
           "This is an unsupported configuration",
-          mInfo.mAudio.mRate, audioData->mRate);
+          mInfo.mAudio.mRate,
+          audioData->mRate);
       mInfo.mAudio.mRate = audioData->mRate;
       mInfo.mAudio.mChannels = audioData->mChannels;
     }
     mAudio.ResolvePromise(audioData, __func__);
   } else if (aTrack == TrackInfo::kVideoTrack) {
     VideoData* videoData = static_cast<VideoData*>(aData);
 
     if (videoData->mDisplay != mInfo.mVideo.mDisplay) {
       LOG("change of video display size (%dx%d->%dx%d)",
           mInfo.mVideo.mDisplay.width, mInfo.mVideo.mDisplay.height,
           videoData->mDisplay.width, videoData->mDisplay.height);
       mInfo.mVideo.mDisplay = videoData->mDisplay;
     }
 
     TimeUnit nextKeyframe;
     if (!mVideo.HasInternalSeekPending() &&
-        NS_SUCCEEDED(mVideo.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe))) {
+        NS_SUCCEEDED(
+          mVideo.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe))) {
       videoData->SetNextKeyFrameTime(nextKeyframe);
     }
 
     mVideo.ResolvePromise(videoData, __func__);
   }
 }
 
 size_t
@@ -2734,18 +2734,18 @@ MediaFormatReader::Seek(const SeekTarget
 
   LOG("aTarget=(%" PRId64 ")", aTarget.GetTime().ToMicroseconds());
 
   MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty());
   MOZ_DIAGNOSTIC_ASSERT(!mVideo.HasPromise());
   MOZ_DIAGNOSTIC_ASSERT(aTarget.IsVideoOnly() || !mAudio.HasPromise());
   MOZ_DIAGNOSTIC_ASSERT(mPendingSeekTime.isNothing());
   MOZ_DIAGNOSTIC_ASSERT(mVideo.mTimeThreshold.isNothing());
-  MOZ_DIAGNOSTIC_ASSERT(aTarget.IsVideoOnly()
-                        || mAudio.mTimeThreshold.isNothing());
+  MOZ_DIAGNOSTIC_ASSERT(aTarget.IsVideoOnly() ||
+                        mAudio.mTimeThreshold.isNothing());
 
   if (!mInfo.mMediaSeekable && !mInfo.mMediaSeekableOnlyInBufferedRanges) {
     LOG("Seek() END (Unseekable)");
     return SeekPromise::CreateAndReject(NS_ERROR_FAILURE, __func__);
   }
 
   if (mShutdown) {
     return SeekPromise::CreateAndReject(NS_ERROR_FAILURE, __func__);
@@ -2820,36 +2820,36 @@ MediaFormatReader::OnSeekFailed(TrackTyp
   LOGV("%s failure:%" PRIu32, TrackTypeToStr(aTrack), static_cast<uint32_t>(aError.Code()));
   if (aTrack == TrackType::kVideoTrack) {
     mVideo.mSeekRequest.Complete();
   } else {
     mAudio.mSeekRequest.Complete();
   }
 
   if (aError == NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA) {
-    if (HasVideo()
-        && aTrack == TrackType::kAudioTrack
-        && mFallbackSeekTime.isSome()
-        && mPendingSeekTime.ref() != mFallbackSeekTime.ref()) {
+    if (HasVideo() &&
+        aTrack == TrackType::kAudioTrack &&
+        mFallbackSeekTime.isSome() &&
+        mPendingSeekTime.ref() != mFallbackSeekTime.ref()) {
       // We have failed to seek audio where video seeked to earlier.
       // Attempt to seek instead to the closest point that we know we have in
       // order to limit A/V sync discrepency.
 
       // Ensure we have the most up to date buffered ranges.
       UpdateReceivedNewData(TrackType::kAudioTrack);
       Maybe<TimeUnit> nextSeekTime;
       // Find closest buffered time found after video seeked time.
       for (const auto& timeRange : mAudio.mTimeRanges) {
         if (timeRange.mStart >= mPendingSeekTime.ref()) {
           nextSeekTime.emplace(timeRange.mStart);
           break;
         }
       }
-      if (nextSeekTime.isNothing()
-          || nextSeekTime.ref() > mFallbackSeekTime.ref()) {
+      if (nextSeekTime.isNothing() ||
+          nextSeekTime.ref() > mFallbackSeekTime.ref()) {
         nextSeekTime = Some(mFallbackSeekTime.ref());
         LOG("Unable to seek audio to video seek time. A/V sync may be broken");
       } else {
         mFallbackSeekTime.reset();
       }
       mPendingSeekTime = nextSeekTime;
       DoAudioSeek();
       return;
@@ -3060,32 +3060,32 @@ MediaFormatReader::UpdateBuffered()
     return;
   }
 
   if (HasVideo()) {
     mVideo.mTimeRanges = mVideo.mTrackDemuxer->GetBuffered();
     bool hasLastEnd;
     auto lastEnd = mVideo.mTimeRanges.GetEnd(&hasLastEnd);
     if (hasLastEnd) {
-      if (mVideo.mLastTimeRangesEnd
-          && mVideo.mLastTimeRangesEnd.ref() < lastEnd) {
+      if (mVideo.mLastTimeRangesEnd &&
+          mVideo.mLastTimeRangesEnd.ref() < lastEnd) {
         // New data was added after our previous end, we can clear the EOS flag.
         mVideo.mDemuxEOS = false;
         ScheduleUpdate(TrackInfo::kVideoTrack);
       }
       mVideo.mLastTimeRangesEnd = Some(lastEnd);
     }
   }
   if (HasAudio()) {
     mAudio.mTimeRanges = mAudio.mTrackDemuxer->GetBuffered();
     bool hasLastEnd;
     auto lastEnd = mAudio.mTimeRanges.GetEnd(&hasLastEnd);
     if (hasLastEnd) {
-      if (mAudio.mLastTimeRangesEnd
-          && mAudio.mLastTimeRangesEnd.ref() < lastEnd) {
+      if (mAudio.mLastTimeRangesEnd &&
+          mAudio.mLastTimeRangesEnd.ref() < lastEnd) {
         // New data was added after our previous end, we can clear the EOS flag.
         mAudio.mDemuxEOS = false;
         ScheduleUpdate(TrackInfo::kAudioTrack);
       }
       mAudio.mLastTimeRangesEnd = Some(lastEnd);
     }
   }
 
@@ -3093,18 +3093,17 @@ MediaFormatReader::UpdateBuffered()
   if (HasAudio() && HasVideo()) {
     intervals = media::Intersection(mVideo.mTimeRanges, mAudio.mTimeRanges);
   } else if (HasAudio()) {
     intervals = mAudio.mTimeRanges;
   } else if (HasVideo()) {
     intervals = mVideo.mTimeRanges;
   }
 
-  if (!intervals.Length()
-      || intervals.GetStart() == TimeUnit::Zero()) {
+  if (!intervals.Length() || intervals.GetStart() == TimeUnit::Zero()) {
     // IntervalSet already starts at 0 or is empty, nothing to shift.
     mBuffered = intervals;
   } else {
     mBuffered =
       intervals.Shift(TimeUnit::Zero() - mInfo.mStartTime);
   }
 }
 
--- a/dom/media/MediaFormatReader.h
+++ b/dom/media/MediaFormatReader.h
@@ -463,18 +463,18 @@ private:
     bool HasFatalError() const
     {
       if (!mError.isSome()) {
         return false;
       }
       if (mError.ref() == NS_ERROR_DOM_MEDIA_DECODE_ERR) {
         // Allow decode errors to be non-fatal, but give up
         // if we have too many, or if warnings should be treated as errors.
-        return mNumOfConsecutiveError > mMaxConsecutiveError
-               || MediaPrefs::MediaWarningsAsErrors();
+        return mNumOfConsecutiveError > mMaxConsecutiveError ||
+               MediaPrefs::MediaWarningsAsErrors();
       } else if (mError.ref() == NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER) {
         // If the caller asked for a new decoder we shouldn't treat
         // it as fatal.
         return false;
       } else {
         // All other error types are fatal
         return true;
       }
--- a/dom/media/MediaInfo.h
+++ b/dom/media/MediaInfo.h
@@ -279,19 +279,19 @@ public:
   // If aWidth and aHeight are identical to the original mImage.width/mImage.height
   // then the scaling ratio will be 1.
   // This is used for when the frame size is different from what the container
   // reports. This is legal in WebM, and we will preserve the ratio of the crop
   // rectangle as it was reported relative to the picture size reported by the
   // container.
   gfx::IntRect ScaledImageRect(int64_t aWidth, int64_t aHeight) const
   {
-    if ((aWidth == mImage.width && aHeight == mImage.height)
-        || !mImage.width
-        || !mImage.height) {
+    if ((aWidth == mImage.width && aHeight == mImage.height) ||
+        !mImage.width ||
+        !mImage.height) {
       return ImageRect();
     }
     gfx::IntRect imageRect = ImageRect();
     imageRect.x = (imageRect.x * aWidth) / mImage.width;
     imageRect.y = (imageRect.y * aHeight) / mImage.height;
     imageRect.SetWidth((aWidth * imageRect.Width()) / mImage.width);
     imageRect.SetHeight((aHeight * imageRect.Height()) / mImage.height);
     return imageRect;
@@ -365,18 +365,18 @@ public:
     , mExtraData(aOther.mExtraData)
   {
   }
 
   static const uint32_t MAX_RATE = 640000;
 
   bool IsValid() const override
   {
-    return mChannels > 0 && mChannels <= MAX_AUDIO_CHANNELS
-           && mRate > 0 && mRate <= MAX_RATE;
+    return mChannels > 0 && mChannels <= MAX_AUDIO_CHANNELS &&
+           mRate > 0 && mRate <= MAX_RATE;
   }
 
   AudioInfo* GetAsAudioInfo() override
   {
     return this;
   }
 
   const AudioInfo* GetAsAudioInfo() const override
@@ -495,34 +495,33 @@ public:
     // Set dummy values so that HasAudio() will return true;
     // See AudioInfo::IsValid()
     mAudio.mChannels = 2;
     mAudio.mRate = 44100;
   }
 
   bool IsEncrypted() const
   {
-    return (HasAudio() && mAudio.mCrypto.mValid)
-           || (HasVideo() && mVideo.mCrypto.mValid);
+    return (HasAudio() && mAudio.mCrypto.mValid) ||
+           (HasVideo() && mVideo.mCrypto.mValid);
   }
 
   bool HasValidMedia() const
   {
     return HasVideo() || HasAudio();
   }
 
   void AssertValid() const
   {
     NS_ASSERTION(!HasAudio() || mAudio.mTrackId != TRACK_INVALID,
                  "Audio track ID must be valid");
     NS_ASSERTION(!HasVideo() || mVideo.mTrackId != TRACK_INVALID,
                  "Audio track ID must be valid");
-    NS_ASSERTION(!HasAudio()
-                 || !HasVideo()
-                 || mAudio.mTrackId != mVideo.mTrackId,
+    NS_ASSERTION(!HasAudio() || !HasVideo() ||
+                 mAudio.mTrackId != mVideo.mTrackId,
                  "Duplicate track IDs");
   }
 
   // TODO: Store VideoInfo and AudioIndo in arrays to support multi-tracks.
   VideoInfo mVideo;
   AudioInfo mAudio;
 
   // If the metadata includes a duration, we store it here.
@@ -728,20 +727,18 @@ public:
     return mFormat;
   }
   bool Interleaved() const
   {
     return mInterleaved;
   }
   bool operator==(const AudioConfig& aOther) const
   {
-    return mChannelLayout == aOther.mChannelLayout
-      && mRate == aOther.mRate
-      && mFormat == aOther.mFormat
-      && mInterleaved == aOther.mInterleaved;
+    return mChannelLayout == aOther.mChannelLayout && mRate == aOther.mRate &&
+           mFormat == aOther.mFormat && mInterleaved == aOther.mInterleaved;
   }
   bool operator!=(const AudioConfig& aOther) const
   {
     return !(*this == aOther);
   }
 
   bool IsValid() const
   {
--- a/dom/media/VideoUtils.h
+++ b/dom/media/VideoUtils.h
@@ -347,58 +347,58 @@ CreateTrackInfoWithMIMETypeAndContainerT
 
 namespace detail {
 
 // aString should start with aMajor + '/'.
 constexpr bool
 StartsWithMIMETypeMajor(const char* aString,
                         const char* aMajor, size_t aMajorRemaining)
 {
-  return (aMajorRemaining == 0 && *aString == '/')
-         || (*aString == *aMajor
-             && StartsWithMIMETypeMajor(aString + 1,
-                                        aMajor + 1, aMajorRemaining - 1));
+  return (aMajorRemaining == 0 && *aString == '/') ||
+         (*aString == *aMajor && StartsWithMIMETypeMajor(aString + 1,
+                                                         aMajor + 1,
+                                                         aMajorRemaining - 1));
 }
 
 // aString should only contain [a-z0-9\-\.] and a final '\0'.
 constexpr bool
 EndsWithMIMESubtype(const char* aString, size_t aRemaining)
 {
-  return aRemaining == 0
-         || (((*aString >= 'a' && *aString <= 'z')
-              || (*aString >= '0' && *aString <= '9')
-              || *aString == '-'
-              || *aString == '.')
-             && EndsWithMIMESubtype(aString + 1, aRemaining - 1));
+  return aRemaining == 0 ||
+         (((*aString >= 'a' && *aString <= 'z') ||
+           (*aString >= '0' && *aString <= '9') ||
+           *aString == '-' ||
+           *aString == '.') &&
+          EndsWithMIMESubtype(aString + 1, aRemaining - 1));
 }
 
 // Simple MIME-type literal string checker with a given (major) type.
 // Only accepts "{aMajor}/[a-z0-9\-\.]+".
 template <size_t MajorLengthPlus1>
 constexpr bool
 IsMIMETypeWithMajor(const char* aString, size_t aLength,
                     const char (&aMajor)[MajorLengthPlus1])
 {
-  return aLength > MajorLengthPlus1 // Major + '/' + at least 1 char
-         && StartsWithMIMETypeMajor(aString, aMajor, MajorLengthPlus1 - 1)
-         && EndsWithMIMESubtype(aString + MajorLengthPlus1,
-                                aLength - MajorLengthPlus1);
+  return aLength > MajorLengthPlus1 && // Major + '/' + at least 1 char
+         StartsWithMIMETypeMajor(aString, aMajor, MajorLengthPlus1 - 1) &&
+         EndsWithMIMESubtype(aString + MajorLengthPlus1,
+                             aLength - MajorLengthPlus1);
 }
 
 } // namespace detail
 
 // Simple MIME-type string checker.
 // Only accepts lowercase "{application,audio,video}/[a-z0-9\-\.]+".
 // Add more if necessary.
 constexpr bool
 IsMediaMIMEType(const char* aString, size_t aLength)
 {
-  return detail::IsMIMETypeWithMajor(aString, aLength, "application")
-         || detail::IsMIMETypeWithMajor(aString, aLength, "audio")
-         || detail::IsMIMETypeWithMajor(aString, aLength, "video");
+  return detail::IsMIMETypeWithMajor(aString, aLength, "application") ||
+         detail::IsMIMETypeWithMajor(aString, aLength, "audio") ||
+         detail::IsMIMETypeWithMajor(aString, aLength, "video");
 }
 
 // Simple MIME-type string literal checker.
 // Only accepts lowercase "{application,audio,video}/[a-z0-9\-\.]+".
 // Add more if necessary.
 template <size_t LengthPlus1>
 constexpr bool
 IsMediaMIMEType(const char (&aString)[LengthPlus1])
@@ -519,20 +519,23 @@ public:
     Pointer mStart;
     Pointer mEnd;
     Pointer mComma;
   };
 
   explicit StringListRange(const String& aList) : mList(aList) {}
   Iterator begin() const
   {
-    return Iterator(mList.Data()
-                    + ((empties == StringListRangeEmptyItems::ProcessEmptyItems
-                        && mList.Length() == 0) ? 1 : 0),
-                    mList.Length());
+    return Iterator(
+      mList.Data()
+      + ((empties == StringListRangeEmptyItems::ProcessEmptyItems &&
+          mList.Length() == 0)
+          ? 1
+          : 0),
+      mList.Length());
   }
   Iterator end() const
   {
     return Iterator(mList.Data() + mList.Length()
                     + (empties != StringListRangeEmptyItems::Skip ? 1 : 0),
                     0);
   }
 private:
--- a/dom/media/encoder/MediaEncoder.cpp
+++ b/dom/media/encoder/MediaEncoder.cpp
@@ -165,18 +165,18 @@ MediaEncoder::CreateEncoder(const nsAStr
   if (!aTrackTypes) {
     LOG(LogLevel::Error, ("NO TrackTypes!!!"));
     return nullptr;
   }
 #ifdef MOZ_WEBM_ENCODER
   else if (MediaEncoder::IsWebMEncoderEnabled() &&
           (aMIMEType.EqualsLiteral(VIDEO_WEBM) ||
           (aTrackTypes & ContainerWriter::CREATE_VIDEO_TRACK))) {
-    if (aTrackTypes & ContainerWriter::CREATE_AUDIO_TRACK
-        && MediaDecoder::IsOpusEnabled()) {
+    if (aTrackTypes & ContainerWriter::CREATE_AUDIO_TRACK &&
+        MediaDecoder::IsOpusEnabled()) {
       audioEncoder = new OpusTrackEncoder();
       NS_ENSURE_TRUE(audioEncoder, nullptr);
     }
     videoEncoder = new VP8TrackEncoder(aTrackRate);
     writer = new WebMWriter(aTrackTypes);
     NS_ENSURE_TRUE(writer, nullptr);
     NS_ENSURE_TRUE(videoEncoder, nullptr);
     mimeType = NS_LITERAL_STRING(VIDEO_WEBM);
--- a/dom/media/flac/FlacDecoder.cpp
+++ b/dom/media/flac/FlacDecoder.cpp
@@ -19,15 +19,15 @@ FlacDecoder::IsEnabled()
   // Until bug 1295886 is fixed.
   return false;
 #endif
 }
 
 /* static */ bool
 FlacDecoder::IsSupportedType(const MediaContainerType& aContainerType)
 {
-  return IsEnabled()
-         && (aContainerType.Type() == MEDIAMIMETYPE("audio/flac")
-             || aContainerType.Type() == MEDIAMIMETYPE("audio/x-flac")
-             || aContainerType.Type() == MEDIAMIMETYPE("application/x-flac"));
+  return IsEnabled() &&
+         (aContainerType.Type() == MEDIAMIMETYPE("audio/flac") ||
+          aContainerType.Type() == MEDIAMIMETYPE("audio/x-flac") ||
+          aContainerType.Type() == MEDIAMIMETYPE("application/x-flac"));
 }
 
 } // namespace mozilla
--- a/dom/media/flac/FlacDemuxer.cpp
+++ b/dom/media/flac/FlacDemuxer.cpp
@@ -444,34 +444,33 @@ public:
 
 private:
   bool GetNextFrame(MediaResourceIndex& aResource)
   {
     while (mNextFrame.FindNext(aResource)) {
       // Move our offset slightly, so that we don't find the same frame at the
       // next FindNext call.
       aResource.Seek(SEEK_CUR, mNextFrame.Header().Size());
-      if (mFrame.IsValid()
-          && mNextFrame.Offset() - mFrame.Offset() < FLAC_MAX_FRAME_SIZE
-          && !CheckCRC16AtOffset(mFrame.Offset(),
-                                 mNextFrame.Offset(),
-                                 aResource)) {
+      if (mFrame.IsValid() &&
+          mNextFrame.Offset() - mFrame.Offset() < FLAC_MAX_FRAME_SIZE &&
+          !CheckCRC16AtOffset(
+            mFrame.Offset(), mNextFrame.Offset(), aResource)) {
         // The frame doesn't match its CRC or would be too far, skip it..
         continue;
       }
       CheckFrameData();
       break;
     }
     return mNextFrame.IsValid();
   }
 
   bool CheckFrameData()
   {
-    if (mNextFrame.Header().Info().mRate == 0
-        || mNextFrame.Header().Info().mBitDepth == 0) {
+    if (mNextFrame.Header().Info().mRate == 0 ||
+        mNextFrame.Header().Info().mBitDepth == 0) {
       if (!Info().IsValid()) {
         // We can only use the STREAMINFO data if we have one.
         mNextFrame.SetInvalid();
       } else {
         if (mNextFrame.Header().Info().mRate == 0) {
           mNextFrame.SetRate(Info().mRate);
         }
         if (mNextFrame.Header().Info().mBitDepth == 0) {
@@ -486,19 +485,18 @@ private:
                           MediaResourceIndex& aResource) const
   {
     int64_t size = aEnd - aStart;
     if (size <= 0) {
       return false;
     }
     UniquePtr<char[]> buffer(new char[size]);
     uint32_t read = 0;
-    if (NS_FAILED(aResource.ReadAt(aStart, buffer.get(),
-                                   size, &read))
-        || read != size) {
+    if (NS_FAILED(aResource.ReadAt(aStart, buffer.get(), size, &read)) ||
+        read != size) {
       NS_WARNING("Couldn't read frame content");
       return false;
     }
 
     uint16_t crc = 0;
     uint8_t* buf = reinterpret_cast<uint8_t*>(buffer.get());
     const uint8_t *end = buf + size;
     while (buf < end) {
@@ -779,18 +777,18 @@ FlacTrackDemuxer::FastSeek(const TimeUni
       // Same frame found twice. We're done.
       break;
     }
     lastFoundOffset = Some(frame.Offset());
 
     if (frame.Time() == aTime) {
       break;
     }
-    if (aTime > frame.Time()
-        && aTime - frame.Time() <= TimeUnit::FromSeconds(GAP_THRESHOLD)) {
+    if (aTime > frame.Time() &&
+        aTime - frame.Time() <= TimeUnit::FromSeconds(GAP_THRESHOLD)) {
       // We're close enough to the target, experimentation shows that bisection
       // search doesn't help much after that.
       break;
     }
     if (frame.Time() > aTime) {
       last = pivot;
       pivot -= (pivot - first) / 2;
     } else {
@@ -808,20 +806,20 @@ FlacTrackDemuxer::FastSeek(const TimeUni
 
 TimeUnit
 FlacTrackDemuxer::ScanUntil(const TimeUnit& aTime)
 {
   LOG("ScanUntil(%f avgFrameLen=%f mParsedFramesDuration=%f offset=%" PRId64,
       aTime.ToSeconds(), AverageFrameLength(),
       mParsedFramesDuration.ToSeconds(), mParser->CurrentFrame().Offset());
 
-   if (!mParser->FirstFrame().IsValid()
-       || aTime <= mParser->FirstFrame().Time()) {
-     return FastSeek(aTime);
-   }
+  if (!mParser->FirstFrame().IsValid() ||
+      aTime <= mParser->FirstFrame().Time()) {
+    return FastSeek(aTime);
+  }
 
   int64_t previousOffset = 0;
   TimeUnit previousTime;
   while (FindNextFrame().IsValid() && mParser->CurrentFrame().Time() < aTime) {
     previousOffset = mParser->CurrentFrame().Offset();
     previousTime = mParser->CurrentFrame().Time();
   }
 
--- a/dom/media/fmp4/MP4Decoder.cpp
+++ b/dom/media/fmp4/MP4Decoder.cpp
@@ -53,21 +53,21 @@ MP4Decoder::IsSupportedType(const MediaC
 {
   if (!IsEnabled()) {
     return false;
   }
 
   // Whitelist MP4 types, so they explicitly match what we encounter on
   // the web, as opposed to what we use internally (i.e. what our demuxers
   // etc output).
-  const bool isAudio = aType.Type() == MEDIAMIMETYPE("audio/mp4")
-                       || aType.Type() == MEDIAMIMETYPE("audio/x-m4a");
-  const bool isVideo = aType.Type() == MEDIAMIMETYPE("video/mp4")
-                       || aType.Type() == MEDIAMIMETYPE("video/quicktime")
-                       || aType.Type() == MEDIAMIMETYPE("video/x-m4v");
+  const bool isAudio = aType.Type() == MEDIAMIMETYPE("audio/mp4") ||
+                       aType.Type() == MEDIAMIMETYPE("audio/x-m4a");
+  const bool isVideo = aType.Type() == MEDIAMIMETYPE("video/mp4") ||
+                       aType.Type() == MEDIAMIMETYPE("video/quicktime") ||
+                       aType.Type() == MEDIAMIMETYPE("video/x-m4v");
 
   if (!isAudio && !isVideo) {
     return false;
   }
 
   nsTArray<UniquePtr<TrackInfo>> trackInfos;
   if (aType.ExtendedType().Codecs().IsEmpty()) {
     // No codecs specified. Assume H.264
--- a/dom/media/fmp4/MP4Demuxer.cpp
+++ b/dom/media/fmp4/MP4Demuxer.cpp
@@ -360,26 +360,25 @@ MP4TrackDemuxer::MP4TrackDemuxer(MP4Demu
                                   mInfo->IsAudio()))
   , mIterator(MakeUnique<mp4_demuxer::SampleIterator>(mIndex))
   , mNeedReIndex(true)
 {
   EnsureUpToDateIndex(); // Force update of index
 
   VideoInfo* videoInfo = mInfo->GetAsVideoInfo();
   // Collect telemetry from h264 AVCC SPS.
-  if (videoInfo
-      && (mInfo->mMimeType.EqualsLiteral("video/mp4")
-          || mInfo->mMimeType.EqualsLiteral("video/avc"))) {
+  if (videoInfo && (mInfo->mMimeType.EqualsLiteral("video/mp4") ||
+                    mInfo->mMimeType.EqualsLiteral("video/avc"))) {
     mIsH264 = true;
     RefPtr<MediaByteBuffer> extraData = videoInfo->mExtraData;
     mNeedSPSForTelemetry = AccumulateSPSTelemetry(extraData);
     mp4_demuxer::SPSData spsdata;
-    if (mp4_demuxer::H264::DecodeSPSFromExtraData(extraData, spsdata)
-        && spsdata.pic_width > 0 && spsdata.pic_height > 0
-        && mp4_demuxer::H264::EnsureSPSIsSane(spsdata)) {
+    if (mp4_demuxer::H264::DecodeSPSFromExtraData(extraData, spsdata) &&
+        spsdata.pic_width > 0 && spsdata.pic_height > 0 &&
+        mp4_demuxer::H264::EnsureSPSIsSane(spsdata)) {
       videoInfo->mImage.width = spsdata.pic_width;
       videoInfo->mImage.height = spsdata.pic_height;
       videoInfo->mDisplay.width = spsdata.display_width;
       videoInfo->mDisplay.height = spsdata.display_height;
     }
   } else {
     // No SPS to be found.
     mNeedSPSForTelemetry = false;
@@ -527,19 +526,18 @@ MP4TrackDemuxer::GetSamples(int32_t aNum
       if (mp4_demuxer::H264::HasSPS(extradata)) {
         RefPtr<MediaByteBuffer> extradata =
           mp4_demuxer::H264::ExtractExtraData(sample);
         mNeedSPSForTelemetry = AccumulateSPSTelemetry(extradata);
       }
     }
   }
 
-  if (mNextKeyframeTime.isNothing()
-      || samples->mSamples.LastElement()->mTime
-      >= mNextKeyframeTime.value()) {
+  if (mNextKeyframeTime.isNothing() ||
+      samples->mSamples.LastElement()->mTime >= mNextKeyframeTime.value()) {
     SetNextKeyFrameTime();
   }
   return SamplesPromise::CreateAndResolve(samples, __func__);
 }
 
 void
 MP4TrackDemuxer::SetNextKeyFrameTime()
 {
--- a/dom/media/gtest/TestVPXDecoding.cpp
+++ b/dom/media/gtest/TestVPXDecoding.cpp
@@ -45,19 +45,18 @@ ParseIVFConfig(nsTArray<uint8_t>& data, 
   if (data[0] != 'D' || data[1] != 'K' || data[2] != 'I' || data[3] != 'F') {
     // Expect 'DKIP'
     return nullptr;
   }
   if (data[4] != 0 || data[5] != 0) {
     // Expect version==0.
     return nullptr;
   }
-  if (data[8] != 'V' || data[9] != 'P'
-      || (data[10] != '8' && data[10] != '9')
-      || data[11] != '0') {
+  if (data[8] != 'V' || data[9] != 'P' ||
+      (data[10] != '8' && data[10] != '9') || data[11] != '0') {
     // Expect 'VP80' or 'VP90'.
     return nullptr;
   }
   config.w = uint32_t(data[12]) || (uint32_t(data[13]) << 8);
   config.h = uint32_t(data[14]) || (uint32_t(data[15]) << 8);
   vpx_codec_iface_t* codec = (data[10] == '8')
                              ? vpx_codec_vp8_dx()
                              : vpx_codec_vp9_dx();
--- a/dom/media/mediasource/MediaSourceDecoder.cpp
+++ b/dom/media/mediasource/MediaSourceDecoder.cpp
@@ -270,18 +270,18 @@ MediaSourceDecoder::GetDuration()
 }
 
 MediaDecoderOwner::NextFrameStatus
 MediaSourceDecoder::NextFrameBufferedStatus()
 {
   MOZ_ASSERT(NS_IsMainThread());
   AbstractThread::AutoEnter context(AbstractMainThread());
 
-  if (!mMediaSource
-      || mMediaSource->ReadyState() == dom::MediaSourceReadyState::Closed) {
+  if (!mMediaSource ||
+      mMediaSource->ReadyState() == dom::MediaSourceReadyState::Closed) {
     return MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
   }
 
   // Next frame hasn't been decoded yet.
   // Use the buffered range to consider if we have the next frame available.
   auto currentPosition = CurrentPosition();
   TimeIntervals buffered = GetBuffered();
   buffered.SetFuzz(MediaSourceDemuxer::EOS_FUZZ / 2);
--- a/dom/media/mediasource/TrackBuffersManager.cpp
+++ b/dom/media/mediasource/TrackBuffersManager.cpp
@@ -827,18 +827,18 @@ TrackBuffersManager::CreateDemuxerforMIM
 
   if (mType.Type() == MEDIAMIMETYPE("video/webm") ||
       mType.Type() == MEDIAMIMETYPE("audio/webm")) {
     mInputDemuxer = new WebMDemuxer(mCurrentInputBuffer, true /* IsMediaSource*/ );
     return;
   }
 
 #ifdef MOZ_FMP4
-  if (mType.Type() == MEDIAMIMETYPE("video/mp4")
-      || mType.Type() == MEDIAMIMETYPE("audio/mp4")) {
+  if (mType.Type() == MEDIAMIMETYPE("video/mp4") ||
+      mType.Type() == MEDIAMIMETYPE("audio/mp4")) {
     mInputDemuxer = new MP4Demuxer(mCurrentInputBuffer);
     return;
   }
 #endif
   NS_WARNING("Not supported (yet)");
 }
 
 // We reset the demuxer by creating a new one and initializing it.
@@ -1770,18 +1770,18 @@ TrackBuffersManager::InsertFrames(TrackB
   // 15. Remove decoding dependencies of the coded frames removed in the previous step:
   // Remove all coded frames between the coded frames removed in the previous step and the next random access point after those removed frames.
 
   TimeIntervals intersection = trackBuffer.mBufferedRanges;
   intersection.Intersection(aIntervals);
 
   if (intersection.Length()) {
     if (aSamples[0]->mKeyframe &&
-        (mType.Type() == MEDIAMIMETYPE("video/webm")
-         || mType.Type() == MEDIAMIMETYPE("audio/webm"))) {
+        (mType.Type() == MEDIAMIMETYPE("video/webm") ||
+         mType.Type() == MEDIAMIMETYPE("audio/webm"))) {
       // We are starting a new GOP, we do not have to worry about breaking an
       // existing current coded frame group. Reset the next insertion index
       // so the search for when to start our frames removal can be exhaustive.
       // This is a workaround for bug 1276184 and only until either bug 1277733
       // or bug 1209386 is fixed.
       // With the webm container, we can't always properly determine the
       // duration of the last frame, which may cause the last frame of a cluster
       // to overlap the following frame.
--- a/dom/media/mp3/MP3Decoder.cpp
+++ b/dom/media/mp3/MP3Decoder.cpp
@@ -16,19 +16,17 @@ MP3Decoder::IsEnabled() {
   RefPtr<PDMFactory> platform = new PDMFactory();
   return platform->SupportsMimeType(NS_LITERAL_CSTRING("audio/mpeg"),
                                     /* DecoderDoctorDiagnostics* */ nullptr);
 }
 
 /* static */
 bool MP3Decoder::IsSupportedType(const MediaContainerType& aContainerType)
 {
-  if (aContainerType.Type() == MEDIAMIMETYPE("audio/mp3")
-      || aContainerType.Type() == MEDIAMIMETYPE("audio/mpeg")) {
-    return
-      IsEnabled()
-      && (aContainerType.ExtendedType().Codecs().IsEmpty()
-          || aContainerType.ExtendedType().Codecs() == "mp3");
+  if (aContainerType.Type() == MEDIAMIMETYPE("audio/mp3") ||
+      aContainerType.Type() == MEDIAMIMETYPE("audio/mpeg")) {
+    return IsEnabled() && (aContainerType.ExtendedType().Codecs().IsEmpty() ||
+                           aContainerType.ExtendedType().Codecs() == "mp3");
   }
   return false;
 }
 
 } // namespace mozilla
--- a/dom/media/mp3/MP3Demuxer.cpp
+++ b/dom/media/mp3/MP3Demuxer.cpp
@@ -473,25 +473,25 @@ MP3TrackDemuxer::FindFirstFrame()
 
 static bool
 VerifyFrameConsistency(const FrameParser::Frame& aFrame1,
                        const FrameParser::Frame& aFrame2)
 {
   const auto& h1 = aFrame1.Header();
   const auto& h2 = aFrame2.Header();
 
-  return h1.IsValid()
-         && h2.IsValid()
-         && h1.Layer() == h2.Layer()
-         && h1.SlotSize() == h2.SlotSize()
-         && h1.SamplesPerFrame() == h2.SamplesPerFrame()
-         && h1.Channels() == h2.Channels()
-         && h1.SampleRate() == h2.SampleRate()
-         && h1.RawVersion() == h2.RawVersion()
-         && h1.RawProtection() == h2.RawProtection();
+  return h1.IsValid() &&
+         h2.IsValid() &&
+         h1.Layer() == h2.Layer() &&
+         h1.SlotSize() == h2.SlotSize() &&
+         h1.SamplesPerFrame() == h2.SamplesPerFrame() &&
+         h1.Channels() == h2.Channels() &&
+         h1.SampleRate() == h2.SampleRate() &&
+         h1.RawVersion() == h2.RawVersion() &&
+         h1.RawProtection() == h2.RawProtection();
 }
 
 MediaByteRange
 MP3TrackDemuxer::FindNextFrame()
 {
   static const int BUFFER_SIZE = 64;
   static const uint32_t MAX_SKIPPABLE_BYTES = 1024 * BUFFER_SIZE;
 
@@ -532,36 +532,35 @@ MP3TrackDemuxer::FindNextFrame()
         maxSkippableBytes += mParser.ID3Header().TotalTagSize();
       }
     } else if (mFrameLock) {
       // We've found a valid MPEG stream, so don't impose any limits
       // to allow skipping corrupted data until we hit EOS.
       maxSkippableBytes = std::numeric_limits<uint32_t>::max();
     }
 
-    if ((mOffset - startOffset > maxSkippableBytes)
-        || (read = Read(buffer, mOffset, BUFFER_SIZE)) == 0) {
+    if ((mOffset - startOffset > maxSkippableBytes) ||
+        (read = Read(buffer, mOffset, BUFFER_SIZE)) == 0) {
       MP3LOG("FindNext() EOS or exceeded maxSkippeableBytes without a frame");
       // This is not a valid MPEG audio stream or we've reached EOS, give up.
       break;
     }
 
     ByteReader reader(buffer, read);
     uint32_t bytesToSkip = 0;
     foundFrame = mParser.Parse(&reader, &bytesToSkip);
     frameHeaderOffset =
       mOffset + reader.Offset() - FrameParser::FrameHeader::SIZE;
 
     // If we've found neither an MPEG frame header nor an ID3v2 tag,
     // the reader shouldn't have any bytes remaining.
     MOZ_ASSERT(foundFrame || bytesToSkip || !reader.Remaining());
 
-    if (foundFrame && mParser.FirstFrame().Length()
-        && !VerifyFrameConsistency(mParser.FirstFrame(),
-                                   mParser.CurrentFrame())) {
+    if (foundFrame && mParser.FirstFrame().Length() &&
+        !VerifyFrameConsistency(mParser.FirstFrame(), mParser.CurrentFrame())) {
       // We've likely hit a false-positive, ignore it and proceed with the
       // search for the next valid frame.
       foundFrame = false;
       mOffset = frameHeaderOffset + 1;
       mParser.EndFrameSession();
     } else {
       // Advance mOffset by the amount of bytes read and if necessary,
       // skip an ID3v2 tag which stretches beyond the current buffer.
--- a/dom/media/mp3/MP3FrameParser.cpp
+++ b/dom/media/mp3/MP3FrameParser.cpp
@@ -396,22 +396,22 @@ bool
 FrameParser::VBRHeader::IsValid() const
 {
   return mType != NONE;
 }
 
 bool
 FrameParser::VBRHeader::IsComplete() const
 {
-  return IsValid()
-         && mNumAudioFrames.valueOr(0) > 0
-         && mNumBytes.valueOr(0) > 0
+  return IsValid() &&
+         mNumAudioFrames.valueOr(0) > 0 &&
+         mNumBytes.valueOr(0) > 0
          // We don't care about the scale for any computations here.
          // && mScale < 101
-         && true;
+         ;
 }
 
 int64_t
 FrameParser::VBRHeader::Offset(float aDurationFac) const
 {
   if (!IsTOCPresent()) {
     return -1;
   }
@@ -694,18 +694,18 @@ ID3Parser::ID3Header::IsValid(int aPos) 
     return true;
   }
   const uint8_t c = mRaw[aPos];
   switch (aPos) {
     case 0: case 1: case 2:
       // Expecting "ID3".
       return id3_header::ID[aPos] == c;
     case 3:
-      return MajorVersion() >= id3_header::MIN_MAJOR_VER
-             && MajorVersion() <= id3_header::MAX_MAJOR_VER;
+      return MajorVersion() >= id3_header::MIN_MAJOR_VER &&
+             MajorVersion() <= id3_header::MAX_MAJOR_VER;
     case 4:
       return MinorVersion() < 0xFF;
     case 5:
       // Validate flags for supported versions, see bug 949036.
       return ((0xFF >> MajorVersion()) & c) == 0;
     case 6: case 7: case 8: case 9:
       return c < 0x80;
   }
@@ -716,18 +716,18 @@ bool
 ID3Parser::ID3Header::IsValid() const
 {
   return mPos >= SIZE;
 }
 
 bool
 ID3Parser::ID3Header::Update(uint8_t c)
 {
-  if (mPos >= id3_header::SIZE_END - id3_header::SIZE_LEN
-      && mPos < id3_header::SIZE_END) {
+  if (mPos >= id3_header::SIZE_END - id3_header::SIZE_LEN &&
+      mPos < id3_header::SIZE_END) {
     mSize <<= 7;
     mSize |= c;
   }
   if (mPos < SIZE) {
     mRaw[mPos] = c;
   }
   return IsValid(mPos++);
 }
--- a/dom/media/ogg/OggCodecState.cpp
+++ b/dom/media/ogg/OggCodecState.cpp
@@ -430,20 +430,20 @@ TheoraState::Time(int64_t granulepos)
 }
 
 bool
 TheoraState::IsHeader(ogg_packet* aPacket)
 {
   return th_packet_isheader(aPacket);
 }
 
-# define TH_VERSION_CHECK(_info,_maj,_min,_sub) \
- (((_info)->version_major>(_maj)||(_info)->version_major==(_maj)) \
-  && (((_info)->version_minor>(_min)||(_info)->version_minor==(_min)) \
-  && (_info)->version_subminor>=(_sub)))
+#define TH_VERSION_CHECK(_info, _maj, _min, _sub)                              \
+  (((_info)->version_major > (_maj) || (_info)->version_major == (_maj)) &&    \
+   (((_info)->version_minor > (_min) || (_info)->version_minor == (_min)) &&   \
+    (_info)->version_subminor >= (_sub)))
 
 int64_t
 TheoraState::Time(th_info* aInfo, int64_t aGranulepos)
 {
   if (aGranulepos < 0 || aInfo->fps_numerator == 0) {
     return -1;
   }
   // Implementation of th_granule_frame inlined here to operate
@@ -596,19 +596,18 @@ TheoraState::ReconstructTheoraGranulepos
     ogg_int64_t frame = firstFrame + i;
     ogg_int64_t granulepos;
     auto& packet = mUnstamped[i];
     bool isKeyframe = th_packet_iskeyframe(packet.get()) == 1;
 
     if (isKeyframe) {
       granulepos = frame << shift;
       keyframe = frame;
-    } else if (frame >= keyframe
-               && frame - keyframe < ((ogg_int64_t)1 << shift))
-    {
+    } else if (frame >= keyframe &&
+               frame - keyframe < ((ogg_int64_t)1 << shift)) {
       // (frame - keyframe) won't overflow the "offset" segment of the
       // granulepos, so it's safe to calculate the granulepos.
       granulepos = (keyframe << shift) + (frame - keyframe);
     } else {
       // (frame - keyframeno) will overflow the "offset" segment of the
       // granulepos, so we take "keyframe" to be the max possible offset
       // frame instead.
       ogg_int64_t k =
@@ -619,33 +618,31 @@ TheoraState::ReconstructTheoraGranulepos
     // should be > 0.
     // Theora 3.2.0 granulepos store the frame index [0..(N-1)], so
     // granulepos should be >= 0.
     NS_ASSERTION(granulepos >= version_3_2_1,
                   "Invalid granulepos for Theora version");
 
     // Check that the frame's granule number is one more than the
     // previous frame's.
-    NS_ASSERTION(i == 0
-                 || th_granule_frame(mCtx, granulepos)
-                    == th_granule_frame(mCtx, mUnstamped[i-1]->granulepos)
-                       + 1,
+    NS_ASSERTION(i == 0 ||
+                 th_granule_frame(mCtx, granulepos) ==
+                 th_granule_frame(mCtx, mUnstamped[i - 1]->granulepos) + 1,
                  "Granulepos calculation is incorrect!");
 
     packet->granulepos = granulepos;
   }
 
   // Check that the second to last frame's granule number is one less than
   // the last frame's (the known granule number). If not our granulepos
   // recovery missed a beat.
   NS_ASSERTION(
-    mUnstamped.Length() < 2
-    || th_granule_frame(mCtx, mUnstamped[mUnstamped.Length() - 2]->granulepos)
-       + 1
-       == th_granule_frame(mCtx, lastGranulepos),
+    mUnstamped.Length() < 2 ||
+    (th_granule_frame(mCtx, mUnstamped[mUnstamped.Length() - 2]->granulepos)
+     + 1) == th_granule_frame(mCtx, lastGranulepos),
     "Granulepos recovery should catch up with packet->granulepos!");
 }
 
 nsresult
 VorbisState::Reset()
 {
   nsresult res = NS_OK;
   if (mActive && vorbis_synthesis_restart(&mDsp) != 0) {
@@ -1127,19 +1124,18 @@ OpusState::Time(int aPreSkip, int64_t aG
   // Ogg Opus always runs at a granule rate of 48 kHz.
   CheckedInt64 t = SaferMultDiv(aGranulepos - aPreSkip, USECS_PER_S, 48000);
   return t.isValid() ? t.value() : -1;
 }
 
 bool
 OpusState::IsHeader(ogg_packet* aPacket)
 {
-  return aPacket->bytes >= 16
-         && (!memcmp(aPacket->packet, "OpusHead", 8)
-             || !memcmp(aPacket->packet, "OpusTags", 8));
+  return aPacket->bytes >= 16 && (!memcmp(aPacket->packet, "OpusHead", 8) ||
+                                  !memcmp(aPacket->packet, "OpusTags", 8));
 }
 
 nsresult
 OpusState::PageIn(ogg_page* aPage)
 {
   if (!mActive) {
     return NS_OK;
   }
@@ -1189,19 +1185,19 @@ OpusState::PacketDuration(ogg_packet* aP
   CheckedInt64 t = SaferMultDiv(GetOpusDeltaGP(aPacket), USECS_PER_S, 48000);
   return t.isValid() ? t.value() : -1;
 }
 
 bool
 OpusState::ReconstructOpusGranulepos(void)
 {
   NS_ASSERTION(mUnstamped.Length() > 0, "Must have unstamped packets");
-  NS_ASSERTION(mUnstamped.LastElement()->e_o_s
-    || mUnstamped.LastElement()->granulepos > 0,
-    "Must know last granulepos!");
+  NS_ASSERTION(mUnstamped.LastElement()->e_o_s ||
+               mUnstamped.LastElement()->granulepos > 0,
+               "Must know last granulepos!");
   int64_t gp;
   // If this is the last page, and we've seen at least one previous page (or
   // this is the first page)...
   if (mUnstamped.LastElement()->e_o_s) {
     auto& last = mUnstamped.LastElement();
     if (mPrevPageGranulepos != -1) {
       // If this file only has one page and the final granule position is
       // smaller than the pre-skip amount, we MUST reject the stream.
@@ -1493,52 +1489,49 @@ static const size_t INDEX_KEYPOINT_OFFSE
 static const size_t FISBONE_MSG_FIELDS_OFFSET = 8;
 static const size_t FISBONE_SERIALNO_OFFSET = 12;
 
 static bool
 IsSkeletonBOS(ogg_packet* aPacket)
 {
   static_assert(SKELETON_MIN_HEADER_LEN >= 8,
                 "Minimum length of skeleton BOS header incorrect");
-  return aPacket->bytes >= SKELETON_MIN_HEADER_LEN
-         && memcmp(reinterpret_cast<char*>(aPacket->packet), "fishead", 8) == 0;
+  return aPacket->bytes >= SKELETON_MIN_HEADER_LEN &&
+         memcmp(reinterpret_cast<char*>(aPacket->packet), "fishead", 8) == 0;
 }
 
 static bool
 IsSkeletonIndex(ogg_packet* aPacket)
 {
   static_assert(SKELETON_4_0_MIN_INDEX_LEN >= 5,
                 "Minimum length of skeleton index header incorrect");
-  return aPacket->bytes >= SKELETON_4_0_MIN_INDEX_LEN
-         && memcmp(reinterpret_cast<char*>(aPacket->packet), "index", 5) == 0;
+  return aPacket->bytes >= SKELETON_4_0_MIN_INDEX_LEN &&
+         memcmp(reinterpret_cast<char*>(aPacket->packet), "index", 5) == 0;
 }
 
 static bool
 IsSkeletonFisbone(ogg_packet* aPacket)
 {
   static_assert(SKELETON_MIN_FISBONE_LEN >= 8,
                 "Minimum length of skeleton fisbone header incorrect");
-  return aPacket->bytes >= SKELETON_MIN_FISBONE_LEN
-         && memcmp(reinterpret_cast<char*>(aPacket->packet), "fisbone", 8) == 0;
+  return aPacket->bytes >= SKELETON_MIN_FISBONE_LEN &&
+         memcmp(reinterpret_cast<char*>(aPacket->packet), "fisbone", 8) == 0;
 }
 
 // Reads a variable length encoded integer at p. Will not read
 // past aLimit. Returns pointer to character after end of integer.
 static const unsigned char*
 ReadVariableLengthInt(const unsigned char* p,
                       const unsigned char* aLimit,
                       int64_t& n)
 {
   int shift = 0;
   int64_t byte = 0;
   n = 0;
-  while (p < aLimit
-         && (byte & 0x80) != 0x80
-         && shift < 57)
-  {
+  while (p < aLimit && (byte & 0x80) != 0x80 && shift < 57) {
     byte = static_cast<int64_t>(*p);
     n |= ((byte & 0x7f) << shift);
     shift += 7;
     p++;
   }
   return p;
 }
 
@@ -1591,19 +1584,19 @@ SkeletonState::DecodeIndex(ogg_packet* a
     (CheckedInt64(numKeyPoints) * MIN_KEY_POINT_SIZE) + INDEX_KEYPOINT_OFFSET;
   if (!minPacketSize.isValid())
   {
     return (mActive = false);
   }
 
   int64_t sizeofIndex = aPacket->bytes - INDEX_KEYPOINT_OFFSET;
   int64_t maxNumKeyPoints = sizeofIndex / MIN_KEY_POINT_SIZE;
-  if (aPacket->bytes < minPacketSize.value()
-      || numKeyPoints > maxNumKeyPoints
-      || numKeyPoints < 0) {
+  if (aPacket->bytes < minPacketSize.value() ||
+      numKeyPoints > maxNumKeyPoints ||
+      numKeyPoints < 0) {
     // Packet size is less than the theoretical minimum size, or the packet is
     // claiming to store more keypoints than it's capable of storing. This means
     // that the numKeyPoints field is too large or small for the packet to
     // possibly contain as many packets as it claims to, so the numKeyPoints
     // field is possibly malicious. Don't try decoding this index, we may run
     // out of memory.
     LOG(LogLevel::Debug, ("Possibly malicious number of key points reported "
                        "(%" PRId64 ") in index packet for stream %u.",
@@ -1618,27 +1611,27 @@ SkeletonState::DecodeIndex(ogg_packet* a
   const unsigned char* limit = aPacket->packet + aPacket->bytes;
   int64_t numKeyPointsRead = 0;
   CheckedInt64 offset = 0;
   CheckedInt64 time = 0;
   while (p < limit && numKeyPointsRead < numKeyPoints) {
     int64_t delta = 0;
     p = ReadVariableLengthInt(p, limit, delta);
     offset += delta;
-    if (p == limit
-        || !offset.isValid()
-        || offset.value() > mLength
-        || offset.value() < 0) {
+    if (p == limit ||
+        !offset.isValid() ||
+        offset.value() > mLength ||
+        offset.value() < 0) {
       return (mActive = false);
     }
     p = ReadVariableLengthInt(p, limit, delta);
     time += delta;
-    if (!time.isValid()
-        || time.value() > endTime
-        || time.value() < startTime) {
+    if (!time.isValid() ||
+        time.value() > endTime ||
+        time.value() < startTime) {
       return (mActive = false);
     }
     CheckedInt64 timeUsecs = SaferMultDiv(time.value(), USECS_PER_S, timeDenom);
     if (!timeUsecs.isValid()) {
       return (mActive = false);
     }
     keyPoints->Add(offset.value(), timeUsecs.value());
     numKeyPointsRead++;
@@ -1657,20 +1650,20 @@ SkeletonState::DecodeIndex(ogg_packet* a
 nsresult
 SkeletonState::IndexedSeekTargetForTrack(uint32_t aSerialno,
                                          int64_t aTarget,
                                          nsKeyPoint& aResult)
 {
   nsKeyFrameIndex* index = nullptr;
   mIndex.Get(aSerialno, &index);
 
-  if (!index
-      || index->Length() == 0
-      || aTarget < index->mStartTime
-      || aTarget > index->mEndTime) {
+  if (!index ||
+      index->Length() == 0 ||
+      aTarget < index->mStartTime ||
+      aTarget > index->mEndTime) {
     return NS_ERROR_FAILURE;
   }
 
   // Binary search to find the last key point with time less than target.
   int start = 0;
   int end = index->Length() - 1;
   while (end > start) {
     int mid = start + ((end - start + 1) >> 1);
@@ -1699,18 +1692,18 @@ SkeletonState::IndexedSeekTarget(int64_t
   }
   // Loop over all requested tracks' indexes, and get the keypoint for that
   // seek target. Record the keypoint with the lowest offset, this will be
   // our seek result. User must seek to the one with lowest offset to ensure we
   // pass "keyframes" on all tracks when we decode forwards to the seek target.
   nsSeekTarget r;
   for (uint32_t i=0; i<aTracks.Length(); i++) {
     nsKeyPoint k;
-    if (NS_SUCCEEDED(IndexedSeekTargetForTrack(aTracks[i], aTarget, k))
-        && k.mOffset < r.mKeyPoint.mOffset) {
+    if (NS_SUCCEEDED(IndexedSeekTargetForTrack(aTracks[i], aTarget, k)) &&
+        k.mOffset < r.mKeyPoint.mOffset) {
       r.mKeyPoint = k;
       r.mSerial = aTracks[i];
     }
   }
   if (r.IsNull()) {
     return NS_ERROR_FAILURE;
   }
   LOG(LogLevel::Debug, ("Indexed seek target for time %" PRId64 " is offset %" PRId64,
@@ -1718,20 +1711,20 @@ SkeletonState::IndexedSeekTarget(int64_t
   aResult = r;
   return NS_OK;
 }
 
 nsresult
 SkeletonState::GetDuration(const nsTArray<uint32_t>& aTracks,
                            int64_t& aDuration)
 {
-  if (!mActive
-      || mVersion < SKELETON_VERSION(4,0)
-      || !HasIndex()
-      || aTracks.Length() == 0) {
+  if (!mActive ||
+      mVersion < SKELETON_VERSION(4,0) ||
+      !HasIndex() ||
+      aTracks.Length() == 0) {
     return NS_ERROR_FAILURE;
   }
   int64_t endTime = INT64_MIN;
   int64_t startTime = INT64_MAX;
   for (uint32_t i=0; i<aTracks.Length(); i++) {
     nsKeyFrameIndex* index = nullptr;
     mIndex.Get(aTracks[i], &index);
     if (!index) {
@@ -1850,19 +1843,19 @@ SkeletonState::DecodeHeader(OggPacketPtr
     int64_t d = LittleEndian::readInt64(
       aPacket->packet + SKELETON_PRESENTATION_TIME_DENOMINATOR_OFFSET);
     mPresentationTime =
       d == 0 ? 0
              : (static_cast<float>(n) / static_cast<float>(d)) * USECS_PER_S;
 
     mVersion = SKELETON_VERSION(verMajor, verMinor);
     // We can only care to parse Skeleton version 4.0+.
-    if (mVersion < SKELETON_VERSION(4,0)
-        || mVersion >= SKELETON_VERSION(5,0)
-        || aPacket->bytes < SKELETON_4_0_MIN_HEADER_LEN) {
+    if (mVersion < SKELETON_VERSION(4,0) ||
+        mVersion >= SKELETON_VERSION(5,0) ||
+        aPacket->bytes < SKELETON_4_0_MIN_HEADER_LEN) {
       return false;
     }
 
     // Extract the segment length.
     mLength =
       LittleEndian::readInt64(aPacket->packet + SKELETON_FILE_LENGTH_OFFSET);
 
     LOG(LogLevel::Debug, ("Skeleton segment length: %" PRId64, mLength));
--- a/dom/media/platforms/PDMFactory.cpp
+++ b/dom/media/platforms/PDMFactory.cpp
@@ -118,19 +118,19 @@ public:
       RefPtr<MediaByteBuffer> extraData =
         aTrackConfig.GetAsVideoInfo()->mExtraData;
       AddToCheckList([mimeType, extraData]() {
         if (MP4Decoder::IsH264(mimeType)) {
           mp4_demuxer::SPSData spsdata;
           // WMF H.264 Video Decoder and Apple ATDecoder
           // do not support YUV444 format.
           // For consistency, all decoders should be checked.
-          if (mp4_demuxer::H264::DecodeSPSFromExtraData(extraData, spsdata)
-              && (spsdata.profile_idc == 244 /* Hi444PP */
-                  || spsdata.chroma_format_idc == PDMFactory::kYUV444)) {
+          if (mp4_demuxer::H264::DecodeSPSFromExtraData(extraData, spsdata) &&
+              (spsdata.profile_idc == 244 /* Hi444PP */ ||
+               spsdata.chroma_format_idc == PDMFactory::kYUV444)) {
             return CheckResult(
               SupportChecker::Reason::kVideoFormatNotSupported,
               MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                           RESULT_DETAIL("Decoder may not have the capability "
                                         "to handle the requested video format "
                                         "with YUV444 chroma subsampling.")));
           }
         }
--- a/dom/media/platforms/agnostic/AgnosticDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/AgnosticDecoderModule.cpp
@@ -19,24 +19,24 @@
 namespace mozilla {
 
 bool
 AgnosticDecoderModule::SupportsMimeType(
   const nsACString& aMimeType,
   DecoderDoctorDiagnostics* aDiagnostics) const
 {
   bool supports =
-    VPXDecoder::IsVPX(aMimeType)
+    VPXDecoder::IsVPX(aMimeType) ||
 #ifdef MOZ_AV1
-    || AOMDecoder::IsAV1(aMimeType)
+    AOMDecoder::IsAV1(aMimeType) ||
 #endif
-    || OpusDataDecoder::IsOpus(aMimeType)
-    || VorbisDataDecoder::IsVorbis(aMimeType)
-    || WaveDataDecoder::IsWave(aMimeType)
-    || TheoraDecoder::IsTheora(aMimeType);
+    OpusDataDecoder::IsOpus(aMimeType) ||
+    VorbisDataDecoder::IsVorbis(aMimeType) ||
+    WaveDataDecoder::IsWave(aMimeType) ||
+    TheoraDecoder::IsTheora(aMimeType);
   MOZ_LOG(sPDMLog, LogLevel::Debug, ("Agnostic decoder %s requested type",
         supports ? "supports" : "rejects"));
   return supports;
 }
 
 already_AddRefed<MediaDataDecoder>
 AgnosticDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
 {
--- a/dom/media/platforms/agnostic/BlankDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/BlankDecoderModule.cpp
@@ -90,20 +90,20 @@ BlankAudioDataCreator::BlankAudioDataCre
 
 already_AddRefed<MediaData>
 BlankAudioDataCreator::Create(MediaRawData* aSample)
 {
   // Convert duration to frames. We add 1 to duration to account for
   // rounding errors, so we get a consistent tone.
   CheckedInt64 frames = UsecsToFrames(
     aSample->mDuration.ToMicroseconds()+1, mSampleRate);
-  if (!frames.isValid()
-      || !mChannelCount
-      || !mSampleRate
-      || frames.value() > (UINT32_MAX / mChannelCount)) {
+  if (!frames.isValid() ||
+      !mChannelCount ||
+      !mSampleRate ||
+      frames.value() > (UINT32_MAX / mChannelCount)) {
     return nullptr;
   }
   AlignedAudioBuffer samples(frames.value() * mChannelCount);
   if (!samples) {
     return nullptr;
   }
   // Fill the sound buffer with an A4 tone.
   static const float pi = 3.14159265f;
--- a/dom/media/platforms/agnostic/VPXDecoder.cpp
+++ b/dom/media/platforms/agnostic/VPXDecoder.cpp
@@ -280,20 +280,20 @@ VPXDecoder::DecodeAlpha(vpx_image_t** aI
 
   return NS_OK;
 }
 
 /* static */
 bool
 VPXDecoder::IsVPX(const nsACString& aMimeType, uint8_t aCodecMask)
 {
-  return ((aCodecMask & VPXDecoder::VP8)
-          && aMimeType.EqualsLiteral("video/vp8"))
-         || ((aCodecMask & VPXDecoder::VP9)
-             && aMimeType.EqualsLiteral("video/vp9"));
+  return ((aCodecMask & VPXDecoder::VP8) &&
+          aMimeType.EqualsLiteral("video/vp8")) ||
+         ((aCodecMask & VPXDecoder::VP9) &&
+          aMimeType.EqualsLiteral("video/vp9"));
 }
 
 /* static */
 bool
 VPXDecoder::IsVP8(const nsACString& aMimeType)
 {
   return IsVPX(aMimeType, VPXDecoder::VP8);
 }
--- a/dom/media/platforms/agnostic/gmp/GMPDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/gmp/GMPDecoderModule.cpp
@@ -47,19 +47,19 @@ CreateDecoderWrapper()
   RefPtr<MediaDataDecoderProxy> decoder(
     new MediaDataDecoderProxy(thread.forget()));
   return decoder.forget();
 }
 
 already_AddRefed<MediaDataDecoder>
 GMPDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
 {
-  if (!MP4Decoder::IsH264(aParams.mConfig.mMimeType)
-      && !VPXDecoder::IsVP8(aParams.mConfig.mMimeType)
-      && !VPXDecoder::IsVP9(aParams.mConfig.mMimeType)) {
+  if (!MP4Decoder::IsH264(aParams.mConfig.mMimeType) &&
+      !VPXDecoder::IsVP8(aParams.mConfig.mMimeType) &&
+      !VPXDecoder::IsVP9(aParams.mConfig.mMimeType)) {
     return nullptr;
   }
 
   RefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper();
   auto params = GMPVideoDecoderParams(aParams);
   wrapper->SetProxyTarget(new GMPVideoDecoder(params));
   return wrapper.forget();
 }
--- a/dom/media/platforms/android/AndroidDecoderModule.cpp
+++ b/dom/media/platforms/android/AndroidDecoderModule.cpp
@@ -134,35 +134,35 @@ AndroidDecoderModule::SupportsMimeType(
   if (aMimeType.EqualsLiteral("video/mp4") ||
       aMimeType.EqualsLiteral("video/avc")) {
     return true;
   }
 
   // When checking "audio/x-wav", CreateDecoder can cause a JNI ERROR by
   // Accessing a stale local reference leading to a SIGSEGV crash.
   // To avoid this we check for wav types here.
-  if (aMimeType.EqualsLiteral("audio/x-wav")
-      || aMimeType.EqualsLiteral("audio/wave; codecs=1")
-      || aMimeType.EqualsLiteral("audio/wave; codecs=6")
-      || aMimeType.EqualsLiteral("audio/wave; codecs=7")
-      || aMimeType.EqualsLiteral("audio/wave; codecs=65534")) {
+  if (aMimeType.EqualsLiteral("audio/x-wav") ||
+      aMimeType.EqualsLiteral("audio/wave; codecs=1") ||
+      aMimeType.EqualsLiteral("audio/wave; codecs=6") ||
+      aMimeType.EqualsLiteral("audio/wave; codecs=7") ||
+      aMimeType.EqualsLiteral("audio/wave; codecs=65534")) {
     return false;
   }
 
-  if ((VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP8)
-       && !GetFeatureStatus(nsIGfxInfo::FEATURE_VP8_HW_DECODE))
-      || (VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP9)
-          && !GetFeatureStatus(nsIGfxInfo::FEATURE_VP9_HW_DECODE))) {
+  if ((VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP8) &&
+       !GetFeatureStatus(nsIGfxInfo::FEATURE_VP8_HW_DECODE)) ||
+      (VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP9) &&
+       !GetFeatureStatus(nsIGfxInfo::FEATURE_VP9_HW_DECODE))) {
     return false;
   }
 
   // Prefer the gecko decoder for opus and vorbis; stagefright crashes
   // on content demuxed from mp4.
-  if (OpusDataDecoder::IsOpus(aMimeType)
-      || VorbisDataDecoder::IsVorbis(aMimeType)) {
+  if (OpusDataDecoder::IsOpus(aMimeType) ||
+      VorbisDataDecoder::IsVorbis(aMimeType)) {
     LOG("Rejecting audio of type %s", aMimeType.Data());
     return false;
   }
 
   return java::HardwareCodecCapabilityUtils::FindDecoderCodecInfoForMimeType(
     nsCString(TranslateMimeType(aMimeType)));
 }
 
--- a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
@@ -38,18 +38,18 @@ FFmpegLibWrapper::Link()
   if (!isFFMpeg) {
     if (macro == 57) {
       // Due to current AVCodecContext binary incompatibility we can only
       // support FFmpeg 57 at this stage.
       Unlink();
       return LinkResult::CannotUseLibAV57;
     }
 #ifdef MOZ_FFMPEG
-    if (version < (54u << 16 | 35u << 8 | 1u)
-        && !MediaPrefs::LibavcodecAllowObsolete()) {
+    if (version < (54u << 16 | 35u << 8 | 1u) &&
+        !MediaPrefs::LibavcodecAllowObsolete()) {
       // Refuse any libavcodec version prior to 54.35.1.
       // (Unless media.libavcodec.allow-obsolete==true)
       Unlink();
       return LinkResult::BlockedOldLibAVVersion;
     }
 #endif
   }
 
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -80,18 +80,18 @@ FFmpegVideoDecoder<LIBAV_VER>::PtsCorrec
   if (aDts != int64_t(AV_NOPTS_VALUE)) {
     mNumFaultyDts += aDts <= mLastDts;
     mLastDts = aDts;
   }
   if (aPts != int64_t(AV_NOPTS_VALUE)) {
     mNumFaultyPts += aPts <= mLastPts;
     mLastPts = aPts;
   }
-  if ((mNumFaultyPts <= mNumFaultyDts || aDts == int64_t(AV_NOPTS_VALUE))
-      && aPts != int64_t(AV_NOPTS_VALUE)) {
+  if ((mNumFaultyPts <= mNumFaultyDts || aDts == int64_t(AV_NOPTS_VALUE)) &&
+      aPts != int64_t(AV_NOPTS_VALUE)) {
     pts = aPts;
   } else {
     pts = aDts;
   }
   return pts;
 }
 
 void
--- a/dom/media/platforms/wmf/DXVA2Manager.cpp
+++ b/dom/media/platforms/wmf/DXVA2Manager.cpp
@@ -182,20 +182,20 @@ HRESULT ConvertMFTypeToDXVAType(IMFMedia
   hr = MFGetAttributeRatio(
     pType, MF_MT_FRAME_RATE, &fpsNumerator, &fpsDenominator);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
   pDesc->InputSampleFreq.Numerator = fpsNumerator;
   pDesc->InputSampleFreq.Denominator = fpsDenominator;
 
   GetDXVA2ExtendedFormatFromMFMediaType(pType, &pDesc->SampleFormat);
   pDesc->OutputFrameFreq = pDesc->InputSampleFreq;
-  if ((pDesc->SampleFormat.SampleFormat
-       == DXVA2_SampleFieldInterleavedEvenFirst)
-      || (pDesc->SampleFormat.SampleFormat
-          == DXVA2_SampleFieldInterleavedOddFirst)) {
+  if ((pDesc->SampleFormat.SampleFormat ==
+       DXVA2_SampleFieldInterleavedEvenFirst) ||
+      (pDesc->SampleFormat.SampleFormat ==
+       DXVA2_SampleFieldInterleavedOddFirst)) {
     pDesc->OutputFrameFreq.Numerator *= 2;
   }
 
   return S_OK;
 }
 
 static const GUID DXVA2_ModeH264_E = {
   0x1b81be68, 0xa0c7, 0x11d3, { 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5 }
@@ -368,18 +368,18 @@ D3D9DXVA2Manager::Init(layers::KnowsComp
     aFailureReason = nsPrintfCString(
       "IDirectXVideoDecoderServer::GetDecoderDeviceGuids failed with error %X",
       hr);
     return hr;
   }
 
   bool found = false;
   for (UINT i = 0; i < deviceCount; i++) {
-    if (decoderDevices[i] == DXVA2_ModeH264_E
-        || decoderDevices[i] == DXVA2_Intel_ModeH264_E) {
+    if (decoderDevices[i] == DXVA2_ModeH264_E ||
+        decoderDevices[i] == DXVA2_Intel_ModeH264_E) {
       mDecoderGUID = decoderDevices[i];
       found = true;
       break;
     }
   }
   CoTaskMemFree(decoderDevices);
 
   if (!found) {
@@ -819,18 +819,18 @@ D3D11DXVA2Manager::InitInternal(layers::
     return hr;
   }
 
   bool found = false;
   UINT profileCount = videoDevice->GetVideoDecoderProfileCount();
   for (UINT i = 0; i < profileCount; i++) {
     GUID id;
     hr = videoDevice->GetVideoDecoderProfile(i, &id);
-    if (SUCCEEDED(hr)
-        && (id == DXVA2_ModeH264_E || id == DXVA2_Intel_ModeH264_E)) {
+    if (SUCCEEDED(hr) &&
+        (id == DXVA2_ModeH264_E || id == DXVA2_Intel_ModeH264_E)) {
       mDecoderGUID = id;
       found = true;
       break;
     }
   }
   if (!found) {
     aFailureReason.AssignLiteral("Failed to find an appropriate decoder GUID");
     return E_FAIL;
@@ -1258,14 +1258,14 @@ DXVA2Manager::~DXVA2Manager()
 bool
 DXVA2Manager::IsUnsupportedResolution(const uint32_t& aWidth,
                                       const uint32_t& aHeight,
                                       const float& aFramerate) const
 {
   // AMD cards with UVD3 or earlier perform poorly trying to decode 1080p60 in
   // hardware, so use software instead. Pick 45 as an arbitrary upper bound for
   // the framerate we can handle.
-  return mIsAMDPreUVD4
-         && (aWidth >= 1920 || aHeight >= 1088)
-         && aFramerate > 45;
+  return mIsAMDPreUVD4 &&
+         (aWidth >= 1920 || aHeight >= 1088) &&
+         aFramerate > 45;
 }
 
 } // namespace mozilla
--- a/dom/media/platforms/wmf/WMFDecoderModule.cpp
+++ b/dom/media/platforms/wmf/WMFDecoderModule.cpp
@@ -229,34 +229,31 @@ bool
 WMFDecoderModule::Supports(const TrackInfo& aTrackInfo,
                            DecoderDoctorDiagnostics* aDiagnostics) const
 {
   if ((aTrackInfo.mMimeType.EqualsLiteral("audio/mp4a-latm") ||
        aTrackInfo.mMimeType.EqualsLiteral("audio/mp4")) &&
        WMFDecoderModule::HasAAC()) {
     return true;
   }
-  if (MP4Decoder::IsH264(aTrackInfo.mMimeType)
-      && WMFDecoderModule::HasH264()) {
+  if (MP4Decoder::IsH264(aTrackInfo.mMimeType) && WMFDecoderModule::HasH264()) {
     if (!MediaPrefs::PDMWMFAllowUnsupportedResolutions()) {
       const VideoInfo* videoInfo = aTrackInfo.GetAsVideoInfo();
       MOZ_ASSERT(videoInfo);
       // Check Windows format constraints, based on:
       // https://msdn.microsoft.com/en-us/library/windows/desktop/dd797815(v=vs.85).aspx
       if (IsWin8OrLater() || IsWin7H264Decoder4KCapable()) {
         // Windows >7, and Win7 with recent-enough decoder, support at most
         // 4096x2304.
-        if (videoInfo->mImage.width > 4096
-            || videoInfo->mImage.height > 2304) {
+        if (videoInfo->mImage.width > 4096 || videoInfo->mImage.height > 2304) {
           return false;
         }
       } else {
         // Windows <=7 (with original decoder) supports at most 1920x1088.
-        if (videoInfo->mImage.width > 1920
-            || videoInfo->mImage.height > 1088) {
+        if (videoInfo->mImage.width > 1920 || videoInfo->mImage.height > 1088) {
           return false;
         }
       }
     }
     return true;
   }
   if (aTrackInfo.mMimeType.EqualsLiteral("audio/mpeg") &&
       CanCreateWMFDecoder<CLSID_CMP3DecMediaObject>()) {
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -326,19 +326,19 @@ FindDXVABlacklistedDLL(
       if (infoSize == 0) {
         // Can't get file info -> Assume we don't have the blacklisted DLL.
         continue;
       }
       // vInfo is a pointer into infoData, that's why we keep it outside of the loop.
       auto infoData = MakeUnique<unsigned char[]>(infoSize);
       VS_FIXEDFILEINFO *vInfo;
       UINT vInfoLen;
-      if (!GetFileVersionInfoW(dllPath, 0, infoSize, infoData.get())
-          || !VerQueryValueW(infoData.get(), L"\\", (LPVOID*)&vInfo, &vInfoLen)
-          || !vInfo) {
+      if (!GetFileVersionInfoW(dllPath, 0, infoSize, infoData.get()) ||
+          !VerQueryValueW(infoData.get(), L"\\", (LPVOID*)&vInfo, &vInfoLen) ||
+          !vInfo) {
         // Can't find version -> Assume it's not blacklisted.
         continue;
       }
 
       nsTArray<nsCString> versions;
       SplitAt(",", nameAndVersions[1], versions);
       for (const auto& version : versions) {
         nsTArray<nsCString> numberStrings;
@@ -367,18 +367,18 @@ FindDXVABlacklistedDLL(
         if (NS_FAILED(errorCode)) {
           NS_WARNING(
             nsPrintfCString("Skipping incorrect '%s' a.b.c.d version format",
                             aDLLBlacklistPrefName)
             .get());
           continue;
         }
 
-        if (vInfo->dwFileVersionMS == ((numbers[0] << 16) | numbers[1])
-            && vInfo->dwFileVersionLS == ((numbers[2] << 16) | numbers[3])) {
+        if (vInfo->dwFileVersionMS == ((numbers[0] << 16) | numbers[1]) &&
+            vInfo->dwFileVersionLS == ((numbers[2] << 16) | numbers[3])) {
           // Blacklisted! Record bad DLL.
           aDLLBlacklistingCache->mBlacklistedDLL.SetLength(0);
           aDLLBlacklistingCache->mBlacklistedDLL.AppendPrintf(
             "%s (%lu.%lu.%lu.%lu)",
             nameAndVersions[0].get(),
             numbers[0],
             numbers[1],
             numbers[2],
@@ -527,19 +527,19 @@ bool
 WMFVideoMFTManager::ValidateVideoInfo()
 {
   // The WMF H.264 decoder is documented to have a minimum resolution
   // 48x48 pixels. We've observed the decoder working for output smaller than
   // that, but on some output it hangs in IMFTransform::ProcessOutput(), so
   // we just reject streams which are less than the documented minimum.
   // https://msdn.microsoft.com/en-us/library/windows/desktop/dd797815(v=vs.85).aspx
   static const int32_t MIN_H264_FRAME_DIMENSION = 48;
-  if (mStreamType == H264
-      && (mVideoInfo.mImage.width < MIN_H264_FRAME_DIMENSION
-          || mVideoInfo.mImage.height < MIN_H264_FRAME_DIMENSION)) {
+  if (mStreamType == H264 &&
+      (mVideoInfo.mImage.width < MIN_H264_FRAME_DIMENSION ||
+       mVideoInfo.mImage.height < MIN_H264_FRAME_DIMENSION)) {
     LogToBrowserConsole(NS_LITERAL_STRING(
       "Can't decode H.264 stream with width or height less than 48 pixels."));
     mIsValid = false;
   }
 
   return mIsValid;
 }
 
--- a/dom/media/platforms/wrappers/H264Converter.cpp
+++ b/dom/media/platforms/wrappers/H264Converter.cpp
@@ -57,18 +57,18 @@ H264Converter::Init()
            TrackType::kVideoTrack, __func__);
 }
 
 RefPtr<MediaDataDecoder::DecodePromise>
 H264Converter::Decode(MediaRawData* aSample)
 {
   MOZ_RELEASE_ASSERT(mFlushPromise.IsEmpty(), "Flush operatin didn't complete");
 
-  MOZ_RELEASE_ASSERT(!mDecodePromiseRequest.Exists()
-                     && !mInitPromiseRequest.Exists(),
+  MOZ_RELEASE_ASSERT(!mDecodePromiseRequest.Exists() &&
+                       !mInitPromiseRequest.Exists(),
                      "Can't request a new decode until previous one completed");
 
   if (!mp4_demuxer::AnnexB::ConvertSampleToAVCC(aSample)) {
     // We need AVCC content to be able to later parse the SPS.
     // This is a no-op if the data is already AVCC.
     return DecodePromise::CreateAndReject(
       MediaResult(NS_ERROR_OUT_OF_MEMORY, RESULT_DETAIL("ConvertSampleToAVCC")),
       __func__);
@@ -117,18 +117,18 @@ H264Converter::Decode(MediaRawData* aSam
                   RESULT_DETAIL("Unable to create H264 decoder")),
       __func__);
   }
 
   if (mNeedKeyframe && !aSample->mKeyframe) {
     return DecodePromise::CreateAndResolve(DecodedData(), __func__);
   }
 
-  if (!*mNeedAVCC
-      && !mp4_demuxer::AnnexB::ConvertSampleToAnnexB(aSample, mNeedKeyframe)) {
+  if (!*mNeedAVCC &&
+      !mp4_demuxer::AnnexB::ConvertSampleToAnnexB(aSample, mNeedKeyframe)) {
     return DecodePromise::CreateAndReject(
       MediaResult(NS_ERROR_OUT_OF_MEMORY,
                   RESULT_DETAIL("ConvertSampleToAnnexB")),
       __func__);
   }
 
   mNeedKeyframe = false;
 
@@ -252,18 +252,18 @@ H264Converter::CreateDecoder(const Video
     return NS_ERROR_NOT_INITIALIZED;
   }
   UpdateConfigFromExtraData(aConfig.mExtraData);
 
   mp4_demuxer::SPSData spsdata;
   if (mp4_demuxer::H264::DecodeSPSFromExtraData(aConfig.mExtraData, spsdata)) {
     // Do some format check here.
     // WMF H.264 Video Decoder and Apple ATDecoder do not support YUV444 format.
-    if (spsdata.profile_idc == 244 /* Hi444PP */
-        || spsdata.chroma_format_idc == PDMFactory::kYUV444) {
+    if (spsdata.profile_idc == 244 /* Hi444PP */ ||
+        spsdata.chroma_format_idc == PDMFactory::kYUV444) {
       mLastError = NS_ERROR_FAILURE;
       if (aDiagnostics) {
         aDiagnostics->SetVideoNotSupported();
       }
       return NS_ERROR_FAILURE;
     }
   } else {
     // SPS was invalid.
@@ -351,35 +351,34 @@ H264Converter::CreateDecoderAndInit(Medi
   }
   return rv;
 }
 
 bool
 H264Converter::CanRecycleDecoder() const
 {
   MOZ_ASSERT(mDecoder);
-  return MediaPrefs::MediaDecoderCheckRecycling()
-         && mDecoder->SupportDecoderRecycling();
+  return MediaPrefs::MediaDecoderCheckRecycling() &&
+         mDecoder->SupportDecoderRecycling();
 }
 
 void
 H264Converter::DecodeFirstSample(MediaRawData* aSample)
 {
   if (mNeedKeyframe && !aSample->mKeyframe) {
     mDecodePromise.Resolve(mPendingFrames, __func__);
     mPendingFrames.Clear();
     return;
   }
 
-  if (!*mNeedAVCC
-      && !mp4_demuxer::AnnexB::ConvertSampleToAnnexB(aSample, mNeedKeyframe)) {
-    mDecodePromise.Reject(
-      MediaResult(NS_ERROR_OUT_OF_MEMORY,
-                  RESULT_DETAIL("ConvertSampleToAnnexB")),
-      __func__);
+  if (!*mNeedAVCC &&
+      !mp4_demuxer::AnnexB::ConvertSampleToAnnexB(aSample, mNeedKeyframe)) {
+    mDecodePromise.Reject(MediaResult(NS_ERROR_OUT_OF_MEMORY,
+                                      RESULT_DETAIL("ConvertSampleToAnnexB")),
+                          __func__);
     return;
   }
 
   mNeedKeyframe = false;
 
   RefPtr<H264Converter> self = this;
   mDecoder->Decode(aSample)
     ->Then(AbstractThread::GetCurrent()->AsTaskQueue(), __func__,
@@ -531,19 +530,18 @@ void H264Converter::FlushThenShutdownDec
            })
     ->Track(mFlushRequest);
 }
 
 void
 H264Converter::UpdateConfigFromExtraData(MediaByteBuffer* aExtraData)
 {
   mp4_demuxer::SPSData spsdata;
-  if (mp4_demuxer::H264::DecodeSPSFromExtraData(aExtraData, spsdata)
-      && spsdata.pic_width > 0
-      && spsdata.pic_height > 0) {
+  if (mp4_demuxer::H264::DecodeSPSFromExtraData(aExtraData, spsdata) &&
+      spsdata.pic_width > 0 && spsdata.pic_height > 0) {
     mp4_demuxer::H264::EnsureSPSIsSane(spsdata);
     mCurrentConfig.mImage.width = spsdata.pic_width;
     mCurrentConfig.mImage.height = spsdata.pic_height;
     mCurrentConfig.mDisplay.width = spsdata.display_width;
     mCurrentConfig.mDisplay.height = spsdata.display_height;
   }
   mCurrentConfig.mExtraData = aExtraData;
 }
--- a/dom/media/systemservices/CamerasParent.cpp
+++ b/dom/media/systemservices/CamerasParent.cpp
@@ -690,18 +690,19 @@ CamerasParent::RecvAllocateCaptureDevice
   RefPtr<CamerasParent> self(this);
   RefPtr<Runnable> mainthread_runnable =
     media::NewRunnableFrom([self, aCapEngine, unique_id, aPrincipalInfo]() -> nsresult {
       // Verify whether the claimed origin has received permission
       // to use the camera, either persistently or this session (one shot).
       bool allowed = HasCameraPermission(aPrincipalInfo);
       if (!allowed) {
         // Developer preference for turning off permission check.
-        if (Preferences::GetBool("media.navigator.permission.disabled", false)
-            || Preferences::GetBool("media.navigator.permission.fake")) {
+        if (Preferences::GetBool("media.navigator.permission.disabled",
+                                 false) ||
+            Preferences::GetBool("media.navigator.permission.fake")) {
           allowed = true;
           LOG(("No permission but checks are disabled or fake sources active"));
         } else {
           LOG(("No camera permission for this origin"));
         }
       }
       // After retrieving the permission (or not) on the main thread,
       // bounce to the WebRTC thread to allocate the device (or not),
@@ -855,20 +856,20 @@ CamerasParent::StopCapture(const Capture
     engine->WithEntry(capnum,[](VideoEngine::CaptureEntry& cap){
       if (cap.VideoCapture()) {
         cap.VideoCapture()->StopCapture();
         cap.VideoCapture()->DeRegisterCaptureDataCallback();
       }
     });
     // we're removing elements, iterate backwards
     for (size_t i = mCallbacks.Length(); i > 0; i--) {
-      if (mCallbacks[i-1]->mCapEngine == aCapEngine
-          && mCallbacks[i-1]->mStreamId == (uint32_t) capnum) {
-        delete mCallbacks[i-1];
-        mCallbacks.RemoveElementAt(i-1);
+      if (mCallbacks[i - 1]->mCapEngine == aCapEngine &&
+          mCallbacks[i - 1]->mStreamId == (uint32_t)capnum) {
+        delete mCallbacks[i - 1];
+        mCallbacks.RemoveElementAt(i - 1);
         break;
       }
     }
     engine->Shutdown();
   }
 }
 
 mozilla::ipc::IPCResult
--- a/dom/media/systemservices/CamerasParent.h
+++ b/dom/media/systemservices/CamerasParent.h
@@ -99,19 +99,20 @@ public:
                                                    const VideoCaptureCapability&) override;
   virtual mozilla::ipc::IPCResult RecvStopCapture(const CaptureEngine&, const int&) override;
   virtual mozilla::ipc::IPCResult RecvReleaseFrame(mozilla::ipc::Shmem&&) override;
   virtual mozilla::ipc::IPCResult RecvAllDone() override;
   virtual void ActorDestroy(ActorDestroyReason aWhy) override;
   virtual mozilla::ipc::IPCResult RecvEnsureInitialized(const CaptureEngine&) override;
 
   nsIEventTarget* GetBackgroundEventTarget() { return mPBackgroundEventTarget; };
-  bool IsShuttingDown() { return !mChildIsAlive
-                              ||  mDestroyed
-                              || !mWebRTCAlive; };
+  bool IsShuttingDown()
+  {
+    return !mChildIsAlive || mDestroyed || !mWebRTCAlive;
+  };
   ShmemBuffer GetBuffer(size_t aSize);
 
   // helper to forward to the PBackground thread
   int DeliverFrameOverIPC(CaptureEngine capEng,
                           uint32_t aStreamId,
                           ShmemBuffer buffer,
                           unsigned char* altbuffer,
                           VideoFrameProperties& aProps);
--- a/dom/media/systemservices/VideoFrameUtils.cpp
+++ b/dom/media/systemservices/VideoFrameUtils.cpp
@@ -51,25 +51,27 @@ void VideoFrameUtils::CopyVideoFrameBuff
                        const size_t aDestBufferSize,
                        const webrtc::VideoFrame& aFrame)
 {
   size_t aggregateSize = TotalRequiredBufferSize(aFrame);
 
   MOZ_ASSERT(aDestBufferSize >= aggregateSize);
 
   // If planes are ordered YUV and contiguous then do a single copy
-  if ((aFrame.video_frame_buffer()->DataY() != nullptr)
+  if ((aFrame.video_frame_buffer()->DataY() != nullptr) &&
       // Check that the three planes are ordered
-      && (aFrame.video_frame_buffer()->DataY() < aFrame.video_frame_buffer()->DataU())
-      && (aFrame.video_frame_buffer()->DataU() < aFrame.video_frame_buffer()->DataV())
+      (aFrame.video_frame_buffer()->DataY()
+       < aFrame.video_frame_buffer()->DataU()) &&
+      (aFrame.video_frame_buffer()->DataU()
+       < aFrame.video_frame_buffer()->DataV()) &&
       //  Check that the last plane ends at firstPlane[totalsize]
-      && (&aFrame.video_frame_buffer()->DataY()[aggregateSize] ==
-          &aFrame.video_frame_buffer()->DataV()[((aFrame.video_frame_buffer()->height()+1)/2) *
-                                                aFrame.video_frame_buffer()->StrideV()]))
-  {
+      (&aFrame.video_frame_buffer()->DataY()[aggregateSize] ==
+       &aFrame.video_frame_buffer()
+          ->DataV()[((aFrame.video_frame_buffer()->height() + 1) / 2)
+                    * aFrame.video_frame_buffer()->StrideV()])) {
     memcpy(aDestBuffer, aFrame.video_frame_buffer()->DataY(), aggregateSize);
     return;
   }
 
   // Copy each plane
   size_t offset = 0;
   size_t size;
   auto height = aFrame.video_frame_buffer()->height();
--- a/dom/media/wave/WaveDecoder.cpp
+++ b/dom/media/wave/WaveDecoder.cpp
@@ -11,22 +11,22 @@
 namespace mozilla {
 
 /* static */ bool
 WaveDecoder::IsSupportedType(const MediaContainerType& aContainerType)
 {
   if (!MediaDecoder::IsWaveEnabled()) {
     return false;
   }
-  if (aContainerType.Type() == MEDIAMIMETYPE("audio/wave")
-      || aContainerType.Type() == MEDIAMIMETYPE("audio/x-wav")
-      || aContainerType.Type() == MEDIAMIMETYPE("audio/wav")
-      || aContainerType.Type() == MEDIAMIMETYPE("audio/x-pn-wav")) {
-    return (aContainerType.ExtendedType().Codecs().IsEmpty()
-            || aContainerType.ExtendedType().Codecs() == "1"
-            || aContainerType.ExtendedType().Codecs() == "6"
-            || aContainerType.ExtendedType().Codecs() == "7");
+  if (aContainerType.Type() == MEDIAMIMETYPE("audio/wave") ||
+      aContainerType.Type() == MEDIAMIMETYPE("audio/x-wav") ||
+      aContainerType.Type() == MEDIAMIMETYPE("audio/wav") ||
+      aContainerType.Type() == MEDIAMIMETYPE("audio/x-pn-wav")) {
+    return (aContainerType.ExtendedType().Codecs().IsEmpty() ||
+            aContainerType.ExtendedType().Codecs() == "1" ||
+            aContainerType.ExtendedType().Codecs() == "6" ||
+            aContainerType.ExtendedType().Codecs() == "7");
   }
 
   return false;
 }
 
 } // namespace mozilla
--- a/dom/media/wave/WaveDemuxer.cpp
+++ b/dom/media/wave/WaveDemuxer.cpp
@@ -859,18 +859,18 @@ FormatParser::FormatChunk::ParseNext(uin
 {
   Update(c);
   return IsValid();
 }
 
 bool
 FormatParser::FormatChunk::IsValid() const
 {
-  return (FrameSize() == SampleRate() * Channels() / 8)
-         && (mPos >= FMT_CHUNK_MIN_SIZE);
+  return (FrameSize() == SampleRate() * Channels() / 8) &&
+         (mPos >= FMT_CHUNK_MIN_SIZE);
 }
 
 void
 FormatParser::FormatChunk::Update(uint8_t c)
 {
   if (mPos < FMT_CHUNK_MIN_SIZE) {
     mRaw[mPos++] = c;
   }
--- a/dom/media/webaudio/ScriptProcessorNode.cpp
+++ b/dom/media/webaudio/ScriptProcessorNode.cpp
@@ -540,19 +540,19 @@ JSObject*
 ScriptProcessorNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
 {
   return ScriptProcessorNodeBinding::Wrap(aCx, this, aGivenProto);
 }
 
 void
 ScriptProcessorNode::UpdateConnectedStatus()
 {
-  bool isConnected = mHasPhantomInput ||
-    !(OutputNodes().IsEmpty() && OutputParams().IsEmpty()
-      && InputNodes().IsEmpty());
+  bool isConnected =
+    mHasPhantomInput || !(OutputNodes().IsEmpty() && OutputParams().IsEmpty() &&
+                          InputNodes().IsEmpty());
 
   // Events are queued even when there is no listener because a listener
   // may be added while events are in the queue.
   SendInt32ParameterToStream(ScriptProcessorNodeEngine::IS_CONNECTED,
                              isConnected);
 
   if (isConnected && HasListenersFor(nsGkAtoms::onaudioprocess)) {
     MarkActive();
--- a/dom/media/webaudio/blink/Reverb.cpp
+++ b/dom/media/webaudio/blink/Reverb.cpp
@@ -146,18 +146,22 @@ void Reverb::initialize(const nsTArray<c
         WriteZeroesToAudioBlock(&m_tempBuffer, 0, WEBAUDIO_BLOCK_SIZE);
     }
 }
 
 void Reverb::process(const AudioBlock* sourceBus, AudioBlock* destinationBus)
 {
     // Do a fairly comprehensive sanity check.
     // If these conditions are satisfied, all of the source and destination pointers will be valid for the various matrixing cases.
-    bool isSafeToProcess = sourceBus && destinationBus && sourceBus->ChannelCount() > 0 && destinationBus->mChannelData.Length() > 0
-        && WEBAUDIO_BLOCK_SIZE <= MaxFrameSize && WEBAUDIO_BLOCK_SIZE <= size_t(sourceBus->GetDuration()) && WEBAUDIO_BLOCK_SIZE <= size_t(destinationBus->GetDuration());
+    bool isSafeToProcess =
+      sourceBus && destinationBus && sourceBus->ChannelCount() > 0 &&
+      destinationBus->mChannelData.Length() > 0 &&
+      WEBAUDIO_BLOCK_SIZE <= MaxFrameSize &&
+      WEBAUDIO_BLOCK_SIZE <= size_t(sourceBus->GetDuration()) &&
+      WEBAUDIO_BLOCK_SIZE <= size_t(destinationBus->GetDuration());
 
     MOZ_ASSERT(isSafeToProcess);
     if (!isSafeToProcess)
         return;
 
     // For now only handle mono or stereo output
     MOZ_ASSERT(destinationBus->ChannelCount() <= 2);
 
--- a/dom/media/webaudio/blink/ReverbConvolver.cpp
+++ b/dom/media/webaudio/blink/ReverbConvolver.cpp
@@ -113,29 +113,30 @@ ReverbConvolver::ReverbConvolver(const f
         } else
             m_stages.AppendElement(stage.forget());
 
         // Figure out next FFT size
         fftSize *= 2;
 
         stageOffset += stageSize;
 
-        if (hasRealtimeConstraint && !isBackgroundStage
-            && fftSize > MaxRealtimeFFTSize) {
-            fftSize = MaxRealtimeFFTSize;
-            // Custom phase positions for all but the first of the realtime
-            // stages of largest size.  These spread out the work of the
-            // larger realtime stages.  None of the FFTs of size 1024, 2048 or
-            // 4096 are performed when processing the same block.  The first
-            // MaxRealtimeFFTSize = 4096 stage, at the end of the doubling,
-            // performs its FFT at block 7.  The FFTs of size 2048 are
-            // performed in blocks 3 + 8 * n and size 1024 at 1 + 4 * n.
-            const uint32_t phaseLookup[] = { 14, 0, 10, 4 };
-            stagePhase = WEBAUDIO_BLOCK_SIZE *
-                phaseLookup[m_stages.Length() % ArrayLength(phaseLookup)];
+        if (hasRealtimeConstraint && !isBackgroundStage &&
+            fftSize > MaxRealtimeFFTSize) {
+          fftSize = MaxRealtimeFFTSize;
+          // Custom phase positions for all but the first of the realtime
+          // stages of largest size.  These spread out the work of the
+          // larger realtime stages.  None of the FFTs of size 1024, 2048 or
+          // 4096 are performed when processing the same block.  The first
+          // MaxRealtimeFFTSize = 4096 stage, at the end of the doubling,
+          // performs its FFT at block 7.  The FFTs of size 2048 are
+          // performed in blocks 3 + 8 * n and size 1024 at 1 + 4 * n.
+          const uint32_t phaseLookup[] = { 14, 0, 10, 4 };
+          stagePhase =
+            WEBAUDIO_BLOCK_SIZE *
+            phaseLookup[m_stages.Length() % ArrayLength(phaseLookup)];
         } else if (fftSize > maxFFTSize) {
             fftSize = maxFFTSize;
             // A prime offset spreads out FFTs in a way that all
             // available phase positions will be used if there are sufficient
             // stages.
             stagePhase += 5 * WEBAUDIO_BLOCK_SIZE;
         } else if (stageSize > WEBAUDIO_BLOCK_SIZE) {
             // As the stages are doubling in size, the next FFT will occur
--- a/dom/media/webm/WebMDemuxer.cpp
+++ b/dom/media/webm/WebMDemuxer.cpp
@@ -191,18 +191,18 @@ WebMDemuxer::Init()
 {
   InitBufferedState();
 
   if (NS_FAILED(ReadMetadata())) {
     return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                                         __func__);
   }
 
-  if (!GetNumberTracks(TrackInfo::kAudioTrack)
-      && !GetNumberTracks(TrackInfo::kVideoTrack)) {
+  if (!GetNumberTracks(TrackInfo::kAudioTrack) &&
+      !GetNumberTracks(TrackInfo::kVideoTrack)) {
     return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                                         __func__);
   }
 
   return InitPromise::CreateAndResolve(NS_OK, __func__);
 }
 
 void
@@ -292,18 +292,18 @@ WebMDemuxer::ReadMetadata()
     }
     mBufferedState->NotifyDataArrived(buffer->Elements(), buffer->Length(), 0);
     if (mBufferedState->GetInitEndOffset() < 0) {
       return NS_ERROR_FAILURE;
     }
     MOZ_ASSERT(mBufferedState->GetInitEndOffset() <= resource.Tell());
   }
   mInitData = resource.MediaReadAt(0, mBufferedState->GetInitEndOffset());
-  if (!mInitData
-      || mInitData->Length() != size_t(mBufferedState->GetInitEndOffset())) {
+  if (!mInitData ||
+      mInitData->Length() != size_t(mBufferedState->GetInitEndOffset())) {
     return NS_ERROR_FAILURE;
   }
 
   unsigned int ntracks = 0;
   r = nestegg_track_count(context, &ntracks);
   if (r == -1) {
     return NS_ERROR_FAILURE;
   }
@@ -340,20 +340,20 @@ WebMDemuxer::ReadMetadata()
       unsigned int cropH = params.crop_right + params.crop_left;
       unsigned int cropV = params.crop_bottom + params.crop_top;
       gfx::IntRect pictureRect(params.crop_left,
                                params.crop_top,
                                params.width - cropH,
                                params.height - cropV);
 
       // If the cropping data appears invalid then use the frame data
-      if (pictureRect.width <= 0
-          || pictureRect.height <= 0
-          || pictureRect.x < 0
-          || pictureRect.y < 0) {
+      if (pictureRect.width <= 0 ||
+          pictureRect.height <= 0 ||
+          pictureRect.x < 0 ||
+          pictureRect.y < 0) {
         pictureRect.x = 0;
         pictureRect.y = 0;
         pictureRect.width = params.width;
         pictureRect.height = params.height;
       }
 
       // Validate the container-reported frame and pictureRect sizes. This
       // ensures that our video frame creation code doesn't overflow.
@@ -468,25 +468,25 @@ WebMDemuxer::ReadMetadata()
     }
   }
   return NS_OK;
 }
 
 bool
 WebMDemuxer::IsSeekable() const
 {
-  return Context(TrackInfo::kVideoTrack)
-         && nestegg_has_cues(Context(TrackInfo::kVideoTrack));
+  return Context(TrackInfo::kVideoTrack) &&
+         nestegg_has_cues(Context(TrackInfo::kVideoTrack));
 }
 
 bool
 WebMDemuxer::IsSeekableOnlyInBufferedRanges() const
 {
-  return Context(TrackInfo::kVideoTrack)
-         && !nestegg_has_cues(Context(TrackInfo::kVideoTrack));
+  return Context(TrackInfo::kVideoTrack) &&
+         !nestegg_has_cues(Context(TrackInfo::kVideoTrack));
 }
 
 void
 WebMDemuxer::EnsureUpToDateIndex()
 {
   if (!mNeedReIndex || !mInitData) {
     return;
   }
@@ -601,18 +601,18 @@ WebMDemuxer::GetNextPacket(TrackInfo::Tr
     if (NS_FAILED(rv) && rv != NS_ERROR_DOM_MEDIA_END_OF_STREAM) {
       return rv;
     }
     if (next_holder) {
       next_tstamp = next_holder->Timestamp();
       PushAudioPacket(next_holder);
     } else if (duration >= 0) {
       next_tstamp = tstamp + duration;
-    } else if (!mIsMediaSource
-               || (mIsMediaSource && mLastAudioFrameTime.isSome())) {
+    } else if (!mIsMediaSource ||
+               (mIsMediaSource && mLastAudioFrameTime.isSome())) {
       next_tstamp = tstamp;
       next_tstamp += tstamp - mLastAudioFrameTime.refOr(0);
     } else {
       PushAudioPacket(holder);
     }
     mLastAudioFrameTime = Some(tstamp);
   } else if (aType == TrackInfo::kVideoTrack) {
     RefPtr<NesteggPacketHolder> next_holder;
@@ -620,18 +620,18 @@ WebMDemuxer::GetNextPacket(TrackInfo::Tr
     if (NS_FAILED(rv) && rv != NS_ERROR_DOM_MEDIA_END_OF_STREAM) {
       return rv;
     }
     if (next_holder) {
       next_tstamp = next_holder->Timestamp();
       PushVideoPacket(next_holder);
     } else if (duration >= 0) {
       next_tstamp = tstamp + duration;
-    } else if (!mIsMediaSource
-               || (mIsMediaSource && mLastVideoFrameTime.isSome())) {
+    } else if (!mIsMediaSource ||
+               (mIsMediaSource && mLastVideoFrameTime.isSome())) {
       next_tstamp = tstamp;
       next_tstamp += tstamp - mLastVideoFrameTime.refOr(0);
     } else {
       PushVideoPacket(holder);
     }
     mLastVideoFrameTime = Some(tstamp);
   }
 
@@ -707,18 +707,18 @@ WebMDemuxer::GetNextPacket(TrackInfo::Tr
             dimensions = VPXDecoder::GetFrameSize(sample, VPXDecoder::Codec::VP9);
             break;
 #ifdef MOZ_AV1
           case NESTEGG_CODEC_AV1:
             dimensions = AOMDecoder::GetFrameSize(sample);
             break;
 #endif
           }
-          if (mLastSeenFrameSize.isSome()
-              && (dimensions != mLastSeenFrameSize.value())) {
+          if (mLastSeenFrameSize.isSome() &&
+              (dimensions != mLastSeenFrameSize.value())) {
             mInfo.mVideo.mDisplay = dimensions;
             mSharedVideoTrackInfo =
               new TrackInfoSharedPtr(mInfo.mVideo, ++sStreamSourceID);
           }
           mLastSeenFrameSize = Some(dimensions);
         }
       }
     }
@@ -756,19 +756,19 @@ WebMDemuxer::GetNextPacket(TrackInfo::Tr
         discardFrames = TimeUnitToFrames(
           TimeUnit::FromNanoseconds(discardPadding), mInfo.mAudio.mRate);
       }
       if (discardFrames.isValid()) {
         sample->mDiscardPadding = discardFrames.value();
       }
     }
 
-    if (packetEncryption == NESTEGG_PACKET_HAS_SIGNAL_BYTE_UNENCRYPTED
-        || packetEncryption == NESTEGG_PACKET_HAS_SIGNAL_BYTE_ENCRYPTED
-        || packetEncryption == NESTEGG_PACKET_HAS_SIGNAL_BYTE_PARTITIONED) {
+    if (packetEncryption == NESTEGG_PACKET_HAS_SIGNAL_BYTE_UNENCRYPTED ||
+        packetEncryption == NESTEGG_PACKET_HAS_SIGNAL_BYTE_ENCRYPTED ||
+        packetEncryption == NESTEGG_PACKET_HAS_SIGNAL_BYTE_PARTITIONED) {
       nsAutoPtr<MediaRawDataWriter> writer(sample->CreateWriter());
       unsigned char const* iv;
       size_t ivLength;
       nestegg_packet_iv(holder->Packet(), &iv, &ivLength);
       writer->mCrypto.mValid = true;
       writer->mCrypto.mIVSize = ivLength;
       if (ivLength == 0) {
         // Frame is not encrypted
@@ -1183,18 +1183,18 @@ WebMTrackDemuxer::SetNextKeyFrameTime()
     if (sample->mKeyframe) {
       frameTime = sample->mTime;
       foundKeyframe = true;
     }
     int64_t sampleTimecode = sample->mTimecode.ToMicroseconds();
     skipSamplesQueue.Push(sample.forget());
     if (!startTime) {
       startTime.emplace(sampleTimecode);
-    } else if (!foundKeyframe
-               && sampleTimecode > startTime.ref() + MAX_LOOK_AHEAD) {
+    } else if (!foundKeyframe &&
+               sampleTimecode > startTime.ref() + MAX_LOOK_AHEAD) {
       WEBM_DEBUG("Couldn't find keyframe in a reasonable time, aborting");
       break;
     }
   }
   // We may have demuxed more than intended, so ensure that all frames are kept
   // in the right order.
   mSamples.PushFront(Move(skipSamplesQueue));
 
@@ -1231,18 +1231,18 @@ WebMTrackDemuxer::UpdateSamples(nsTArray
   for (const auto& sample : aSamples) {
     if (sample->mCrypto.mValid) {
       nsAutoPtr<MediaRawDataWriter> writer(sample->CreateWriter());
       writer->mCrypto.mMode = mInfo->mCrypto.mMode;
       writer->mCrypto.mIVSize = mInfo->mCrypto.mIVSize;
       writer->mCrypto.mKeyId.AppendElements(mInfo->mCrypto.mKeyId);
     }
   }
-  if (mNextKeyframeTime.isNothing()
-      || aSamples.LastElement()->mTime >= mNextKeyframeTime.value()) {
+  if (mNextKeyframeTime.isNothing() ||
+      aSamples.LastElement()->mTime >= mNextKeyframeTime.value()) {
     SetNextKeyFrameTime();
   }
 }
 
 nsresult
 WebMTrackDemuxer::GetNextRandomAccessPoint(TimeUnit* aTime)
 {
   if (mNextKeyframeTime.isNothing()) {
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -355,18 +355,18 @@ MediaEngineWebRTCMicrophoneSource::Updat
         MOZ_ASSERT(mSources.Length() > 0);
         auto& source = mSources.LastElement();
         mAudioInput->SetUserChannelCount(prefs.mChannels);
         // Get validated number of channel
         uint32_t channelCount = 0;
         mAudioInput->GetChannelCount(channelCount);
         MOZ_ASSERT(channelCount > 0 && mLastPrefs.mChannels > 0);
         // Check if new validated channels is the same as previous
-        if (static_cast<uint32_t>(mLastPrefs.mChannels) != channelCount
-            && !source->OpenNewAudioCallbackDriver(mListener)) {
+        if (static_cast<uint32_t>(mLastPrefs.mChannels) != channelCount &&
+            !source->OpenNewAudioCallbackDriver(mListener)) {
           return NS_ERROR_FAILURE;
         }
         // Update settings
         prefs.mChannels = channelCount;
       }
 
       if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) {
         MonitorAutoLock lock(mMonitor);
--- a/dom/media/webspeech/recognition/SpeechRecognition.cpp
+++ b/dom/media/webspeech/recognition/SpeechRecognition.cpp
@@ -96,17 +96,20 @@ GetSpeechRecognitionService(const nsAStr
   }
 
   nsresult rv;
   nsCOMPtr<nsISpeechRecognitionService> recognitionService;
   recognitionService = do_GetService(speechRecognitionServiceCID.get(), &rv);
   return recognitionService.forget();
 }
 
-NS_IMPL_CYCLE_COLLECTION_INHERITED(SpeechRecognition, DOMEventTargetHelper, mDOMStream, mSpeechGrammarList)
+NS_IMPL_CYCLE_COLLECTION_INHERITED(SpeechRecognition,
+                                   DOMEventTargetHelper,
+                                   mDOMStream,
+                                   mSpeechGrammarList)
 
 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(SpeechRecognition)
   NS_INTERFACE_MAP_ENTRY(nsIObserver)
 NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper)
 
 NS_IMPL_ADDREF_INHERITED(SpeechRecognition, DOMEventTargetHelper)
 NS_IMPL_RELEASE_INHERITED(SpeechRecognition, DOMEventTargetHelper)
 
@@ -156,32 +159,35 @@ SpeechRecognition::WrapObject(JSContext*
 }
 
 bool
 SpeechRecognition::IsAuthorized(JSContext* aCx, JSObject* aGlobal)
 {
   nsCOMPtr<nsIPrincipal> principal = nsContentUtils::ObjectPrincipal(aGlobal);
 
   nsresult rv;
-  nsCOMPtr<nsIPermissionManager> mgr = do_GetService(NS_PERMISSIONMANAGER_CONTRACTID, &rv);
+  nsCOMPtr<nsIPermissionManager> mgr =
+    do_GetService(NS_PERMISSIONMANAGER_CONTRACTID, &rv);
   if (NS_WARN_IF(NS_FAILED(rv))) {
     return false;
   }
 
   uint32_t speechRecognition = nsIPermissionManager::UNKNOWN_ACTION;
-  rv = mgr->TestExactPermissionFromPrincipal(principal, "speech-recognition", &speechRecognition);
+  rv = mgr->TestExactPermissionFromPrincipal(
+    principal, "speech-recognition", &speechRecognition);
   if (NS_WARN_IF(NS_FAILED(rv))) {
     return false;
   }
 
-  bool hasPermission = (speechRecognition == nsIPermissionManager::ALLOW_ACTION);
+  bool hasPermission =
+    (speechRecognition == nsIPermissionManager::ALLOW_ACTION);
 
-  return (hasPermission || MediaPrefs::WebSpeechRecognitionForceEnabled()
-          || MediaPrefs::WebSpeechTestEnabled())
-         && MediaPrefs::WebSpeechRecognitionEnabled();
+  return (hasPermission || MediaPrefs::WebSpeechRecognitionForceEnabled() ||
+          MediaPrefs::WebSpeechTestEnabled()) &&
+         MediaPrefs::WebSpeechRecognitionEnabled();
 }
 
 already_AddRefed<SpeechRecognition>
 SpeechRecognition::Constructor(const GlobalObject& aGlobal,
                                ErrorResult& aRv)
 {
   nsCOMPtr<nsPIDOMWindowInner> win = do_QueryInterface(aGlobal.GetAsSupports());
   if (!win) {
@@ -360,33 +366,35 @@ SpeechRecognition::Transition(SpeechEven
         case EVENT_AUDIO_DATA:
           DoNothing(aEvent);
           break;
         case EVENT_ABORT:
           AbortSilently(aEvent);
           break;
         case EVENT_START:
         case EVENT_RECOGNITIONSERVICE_INTERMEDIATE_RESULT:
-          SR_LOG("STATE_WAITING_FOR_RESULT: Unhandled aEvent %s", GetName(aEvent));
+          SR_LOG("STATE_WAITING_FOR_RESULT: Unhandled aEvent %s",
+                 GetName(aEvent));
           MOZ_CRASH();
         case EVENT_COUNT:
           MOZ_CRASH("Invalid event EVENT_COUNT");
       }
       break;
     case STATE_COUNT:
       MOZ_CRASH("Invalid state STATE_COUNT");
   }
 }
 
 /*
  * Handle a segment of recorded audio data.
  * Returns the number of samples that were processed.
  */
 uint32_t
-SpeechRecognition::ProcessAudioSegment(AudioSegment* aSegment, TrackRate aTrackRate)
+SpeechRecognition::ProcessAudioSegment(AudioSegment* aSegment,
+                                       TrackRate aTrackRate)
 {
   AudioSegment::ChunkIterator iterator(*aSegment);
   uint32_t samples = 0;
   while (!iterator.IsEnded()) {
     float out;
     mEndpointer.ProcessAudio(*iterator, &out);
     samples += iterator->GetDuration();
     iterator.Next();
@@ -436,17 +444,18 @@ SpeechRecognition::WaitForAudioData(Spee
 }
 
 void
 SpeechRecognition::StartedAudioCapture(SpeechEvent* aEvent)
 {
   SetState(STATE_ESTIMATING);
 
   mEndpointer.SetEnvironmentEstimationMode();
-  mEstimationSamples += ProcessAudioSegment(aEvent->mAudioSegment, aEvent->mTrackRate);
+  mEstimationSamples +=
+    ProcessAudioSegment(aEvent->mAudioSegment, aEvent->mTrackRate);
 
   DispatchTrustedEvent(NS_LITERAL_STRING("audiostart"));
   if (mCurrentState == STATE_ESTIMATING) {
     DispatchTrustedEvent(NS_LITERAL_STRING("start"));
   }
 }
 
 void
@@ -460,17 +469,18 @@ SpeechRecognition::StopRecordingAndRecog
   StopRecording();
 }
 
 void
 SpeechRecognition::WaitForEstimation(SpeechEvent* aEvent)
 {
   SetState(STATE_ESTIMATING);
 
-  mEstimationSamples += ProcessAudioSegment(aEvent->mAudioSegment, aEvent->mTrackRate);
+  mEstimationSamples +=
+    ProcessAudioSegment(aEvent->mAudioSegment, aEvent->mTrackRate);
   if (mEstimationSamples > kESTIMATION_SAMPLES) {
     mEndpointer.SetUserInputMode();
     SetState(STATE_WAITING_FOR_SPEECH);
   }
 }
 
 void
 SpeechRecognition::DetectSpeech(SpeechEvent* aEvent)
@@ -510,18 +520,18 @@ SpeechRecognition::NotifyFinalResult(Spe
   RootedDictionary<SpeechRecognitionEventInit> init(RootingCx());
   init.mBubbles = true;
   init.mCancelable = false;
   // init.mResultIndex = 0;
   init.mResults = aEvent->mRecognitionResultList;
   init.mInterpretation = JS::NullValue();
   // init.mEmma = nullptr;
 
-  RefPtr<SpeechRecognitionEvent> event =
-    SpeechRecognitionEvent::Constructor(this, NS_LITERAL_STRING("result"), init);
+  RefPtr<SpeechRecognitionEvent> event = SpeechRecognitionEvent::Constructor(
+    this, NS_LITERAL_STRING("result"), init);
   event->SetTrusted(true);
 
   bool defaultActionEnabled;
   this->DispatchEvent(event, &defaultActionEnabled);
 }
 
 void
 SpeechRecognition::DoNothing(SpeechEvent* aEvent)
@@ -616,24 +626,26 @@ SpeechRecognition::Observe(nsISupports* 
              !strcmp(aTopic, SPEECH_RECOGNITION_TEST_EVENT_REQUEST_TOPIC)) {
     ProcessTestEventRequest(aSubject, nsDependentString(aData));
   }
 
   return NS_OK;
 }
 
 void
-SpeechRecognition::ProcessTestEventRequest(nsISupports* aSubject, const nsAString& aEventName)
+SpeechRecognition::ProcessTestEventRequest(nsISupports* aSubject,
+                                           const nsAString& aEventName)
 {
   if (aEventName.EqualsLiteral("EVENT_ABORT")) {
     Abort();
   } else if (aEventName.EqualsLiteral("EVENT_AUDIO_ERROR")) {
-    DispatchError(SpeechRecognition::EVENT_AUDIO_ERROR,
-                  SpeechRecognitionErrorCode::Audio_capture, // TODO different codes?
-                  NS_LITERAL_STRING("AUDIO_ERROR test event"));
+    DispatchError(
+      SpeechRecognition::EVENT_AUDIO_ERROR,
+      SpeechRecognitionErrorCode::Audio_capture, // TODO different codes?
+      NS_LITERAL_STRING("AUDIO_ERROR test event"));
   } else {
     NS_ASSERTION(MediaPrefs::WebSpeechFakeRecognitionService(),
                  "Got request for fake recognition service event, but "
                  TEST_PREFERENCE_FAKE_RECOGNITION_SERVICE " is unset");
 
     // let the fake recognition service handle the request
   }
 }
@@ -812,17 +824,18 @@ SpeechRecognition::ValidateAndSetGrammar
     return false;
   }
 
   for (uint32_t count = 0; count < grammarListLength; ++count) {
     RefPtr<SpeechGrammar> speechGrammar = mSpeechGrammarList->Item(count, aRv);
     if (aRv.Failed()) {
       return false;
     }
-    if (NS_FAILED(mRecognitionService->ValidateAndSetGrammarList(speechGrammar.get(), nullptr))) {
+    if (NS_FAILED(mRecognitionService->ValidateAndSetGrammarList(
+          speechGrammar.get(), nullptr))) {
       aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
       return false;
     }
   }
 
   return true;
 }
 
@@ -930,17 +943,18 @@ SpeechRecognition::CreateAudioSegment(ns
   }
 
   return segment;
 }
 
 void
 SpeechRecognition::FeedAudioData(already_AddRefed<SharedBuffer> aSamples,
                                  uint32_t aDuration,
-                                 MediaStreamListener* aProvider, TrackRate aTrackRate)
+                                 MediaStreamListener* aProvider,
+                                 TrackRate aTrackRate)
 {
   NS_ASSERTION(!NS_IsMainThread(),
                "FeedAudioData should not be called in the main thread");
 
   // Endpointer expects to receive samples in chunks whose size is a
   // multiple of its frame size.
   // Since we can't assume we will receive the frames in appropriate-sized
   // chunks, we must buffer and split them in chunks of mAudioSamplesPerChunk
@@ -1030,30 +1044,32 @@ SpeechEvent::~SpeechEvent()
 
 NS_IMETHODIMP
 SpeechEvent::Run()
 {
   mRecognition->ProcessEvent(this);
   return NS_OK;
 }
 
-NS_IMPL_ISUPPORTS(SpeechRecognition::GetUserMediaSuccessCallback, nsIDOMGetUserMediaSuccessCallback)
+NS_IMPL_ISUPPORTS(SpeechRecognition::GetUserMediaSuccessCallback,
+                  nsIDOMGetUserMediaSuccessCallback)
 
 NS_IMETHODIMP
 SpeechRecognition::GetUserMediaSuccessCallback::OnSuccess(nsISupports* aStream)
 {
   RefPtr<DOMMediaStream> stream = do_QueryObject(aStream);
   if (!stream) {
     return NS_ERROR_NO_INTERFACE;
   }
   mRecognition->StartRecording(stream);
   return NS_OK;
 }
 
-NS_IMPL_ISUPPORTS(SpeechRecognition::GetUserMediaErrorCallback, nsIDOMGetUserMediaErrorCallback)
+NS_IMPL_ISUPPORTS(SpeechRecognition::GetUserMediaErrorCallback,
+                  nsIDOMGetUserMediaErrorCallback)
 
 NS_IMETHODIMP
 SpeechRecognition::GetUserMediaErrorCallback::OnError(nsISupports* aError)
 {
   RefPtr<MediaStreamError> error = do_QueryObject(aError);
   if (!error) {
     return NS_OK;
   }