Bug 1350837. P2 - change the type of mAmpleAudioThresholdUsecs to TimeUnit. draft
authorJW Wang <jwwang@mozilla.com>
Tue, 28 Mar 2017 14:27:26 +0800
changeset 553391 040167eece526351f2aa1b9fdebb13396d7d85ae
parent 553390 5753d0c43a6ec4f0a2025190f5d6e06bd14048ff
child 553392 4987e4d66e347043f425e462e9b9a2988e00fd0c
push id51626
push userjwwang@mozilla.com
push dateThu, 30 Mar 2017 00:55:27 +0000
bugs1350837
milestone55.0a1
Bug 1350837. P2 - change the type of mAmpleAudioThresholdUsecs to TimeUnit. MozReview-Commit-ID: 2de8DrlemaT
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaDecoderStateMachine.h
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -814,28 +814,29 @@ private:
       return;
     }
 
     TimeDuration decodeTime = TimeStamp::Now() - aDecodeStart;
     int64_t adjustedTime = THRESHOLD_FACTOR * DurationToUsecs(decodeTime);
     if (adjustedTime > mMaster->mLowAudioThreshold.ToMicroseconds()
         && !mMaster->HasLowBufferedData())
     {
-      mMaster->mLowAudioThreshold = TimeUnit::FromMicroseconds(
-        std::min(adjustedTime, mMaster->mAmpleAudioThresholdUsecs));
-
-      mMaster->mAmpleAudioThresholdUsecs =
-        std::max(THRESHOLD_FACTOR * mMaster->mLowAudioThreshold.ToMicroseconds(),
-                 mMaster->mAmpleAudioThresholdUsecs);
+      mMaster->mLowAudioThreshold = std::min(
+        TimeUnit::FromMicroseconds(adjustedTime),
+        mMaster->mAmpleAudioThresholdUsecs);
+
+      mMaster->mAmpleAudioThresholdUsecs = std::max(
+        mMaster->mLowAudioThreshold * THRESHOLD_FACTOR,
+        mMaster->mAmpleAudioThresholdUsecs);
 
       SLOG("Slow video decode, set "
            "mLowAudioThresholdUsecs=%" PRId64
            " mAmpleAudioThresholdUsecs=%" PRId64,
            mMaster->mLowAudioThreshold.ToMicroseconds(),
-           mMaster->mAmpleAudioThresholdUsecs);
+           mMaster->mAmpleAudioThresholdUsecs.ToMicroseconds());
     }
   }
 
   bool DonePrerollingAudio()
   {
     return !mMaster->IsAudioDecoding()
            || mMaster->GetDecodedAudioDuration()
               >= mMaster->AudioPrerollUsecs() * mMaster->mPlaybackRate;
@@ -2592,17 +2593,17 @@ MediaDecoderStateMachine::MediaDecoderSt
   mCurrentFrameID(0),
   INIT_WATCHABLE(mObservedDuration, TimeUnit()),
   mFragmentEndTime(-1),
   mReader(new MediaDecoderReaderWrapper(mTaskQueue, aReader)),
   mDecodedAudioEndTime(0),
   mDecodedVideoEndTime(0),
   mPlaybackRate(1.0),
   mLowAudioThreshold(detail::LOW_AUDIO_THRESHOLD),
-  mAmpleAudioThresholdUsecs(detail::AMPLE_AUDIO_THRESHOLD.ToMicroseconds()),
+  mAmpleAudioThresholdUsecs(detail::AMPLE_AUDIO_THRESHOLD),
   mAudioCaptured(false),
   mMinimizePreroll(aDecoder->GetMinimizePreroll()),
   mSentLoadedMetadataEvent(false),
   mSentFirstFrameLoadedEvent(false),
   mVideoDecodeSuspended(false),
   mVideoDecodeSuspendTimer(mTaskQueue),
   mOutputStreamManager(new OutputStreamManager()),
   mResource(aDecoder->GetResource()),
@@ -2750,17 +2751,17 @@ MediaDecoderStateMachine::GetDecodedAudi
   // MediaSink not started. All audio samples are in the queue.
   return AudioQueue().Duration();
 }
 
 bool
 MediaDecoderStateMachine::HaveEnoughDecodedAudio()
 {
   MOZ_ASSERT(OnTaskQueue());
-  auto ampleAudioUSecs = mAmpleAudioThresholdUsecs * mPlaybackRate;
+  auto ampleAudioUSecs = mAmpleAudioThresholdUsecs.ToMicroseconds() * mPlaybackRate;
   return AudioQueue().GetSize() > 0
          && GetDecodedAudioDuration() >= ampleAudioUSecs;
 }
 
 bool MediaDecoderStateMachine::HaveEnoughDecodedVideo()
 {
   MOZ_ASSERT(OnTaskQueue());
   return VideoQueue().GetSize() >= GetAmpleVideoFrames() * mPlaybackRate + 1;
@@ -3784,18 +3785,18 @@ MediaDecoderStateMachine::SetAudioCaptur
 
   // Restore playback parameters.
   mMediaSink->SetPlaybackParams(params);
 
   mAudioCaptured = aCaptured;
 
   // Don't buffer as much when audio is captured because we don't need to worry
   // about high latency audio devices.
-  mAmpleAudioThresholdUsecs =
-    (mAudioCaptured ? detail::AMPLE_AUDIO_THRESHOLD / 2 : detail::AMPLE_AUDIO_THRESHOLD).ToMicroseconds();
+  mAmpleAudioThresholdUsecs = mAudioCaptured
+    ? detail::AMPLE_AUDIO_THRESHOLD / 2 : detail::AMPLE_AUDIO_THRESHOLD;
 
   mStateObj->HandleAudioCaptured();
 }
 
 uint32_t MediaDecoderStateMachine::GetAmpleVideoFrames() const
 {
   MOZ_ASSERT(OnTaskQueue());
   return (mReader->IsAsync() && mReader->VideoIsHardwareAccelerated())
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -586,28 +586,28 @@ private:
   media::TimeUnit mLowAudioThreshold;
 
   // Our "ample" audio threshold. Once we've this much audio decoded, we
   // pause decoding. If we increase mLowAudioThreshold, we'll also
   // increase this too appropriately (we don't want mLowAudioThreshold
   // to be greater than ampleAudioThreshold, else we'd stop decoding!).
   // Note that we don't ever reset this threshold, it only ever grows as
   // we detect that the decode can't keep up with rendering.
-  int64_t mAmpleAudioThresholdUsecs;
+  media::TimeUnit mAmpleAudioThresholdUsecs;
 
   // At the start of decoding we want to "preroll" the decode until we've
   // got a few frames decoded before we consider whether decode is falling
   // behind. Otherwise our "we're falling behind" logic will trigger
   // unnecessarily if we start playing as soon as the first sample is
   // decoded. These two fields store how many video frames and audio
   // samples we must consume before are considered to be finished prerolling.
   uint32_t AudioPrerollUsecs() const
   {
     MOZ_ASSERT(OnTaskQueue());
-    return mAmpleAudioThresholdUsecs / 2;
+    return mAmpleAudioThresholdUsecs.ToMicroseconds() / 2;
   }
 
   uint32_t VideoPrerollFrames() const
   {
     MOZ_ASSERT(OnTaskQueue());
     return GetAmpleVideoFrames() / 2;
   }