Bug 1353607. P5 - change the type of mDecodedAudioEndTime/mDecodedVideoEndTime to TimeUnit. draft
authorJW Wang <jwwang@mozilla.com>
Tue, 28 Mar 2017 17:20:17 +0800
changeset 557652 9b6bde5c7577dbbe8f2eea31180c771854b8c277
parent 557651 ea05e191918813dfe0d722d9a664f8f71e267762
child 557653 9583ba8562fcfc6401cf568a86ef2ea65bbb71cf
push id52775
push userjwwang@mozilla.com
push dateFri, 07 Apr 2017 03:28:07 +0000
bugs1353607
milestone55.0a1
Bug 1353607. P5 - change the type of mDecodedAudioEndTime/mDecodedVideoEndTime to TimeUnit. MozReview-Commit-ID: 2bVQD5BViU4
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaDecoderStateMachine.h
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -1887,18 +1887,17 @@ public:
   {
     // Do nothing since no decoding is going on.
   }
 
   void HandleResumeVideoDecoding(const TimeUnit&) override
   {
     // Resume the video decoder and seek to the last video frame.
     // This triggers a video-only seek which won't update the playback position.
-    StateObject::HandleResumeVideoDecoding(
-      TimeUnit::FromMicroseconds(mMaster->mDecodedVideoEndTime));
+    StateObject::HandleResumeVideoDecoding(mMaster->mDecodedVideoEndTime);
   }
 
   void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState) override
   {
     if (aPlayState == MediaDecoder::PLAY_STATE_PLAYING) {
       // Schedule Step() to check if we can start playback.
       mMaster->ScheduleStateMachine();
     }
@@ -2318,17 +2317,17 @@ DecodingState::NeedToSkipToNextKeyframe(
   // readers that are async, as since their audio decode runs on a different
   // task queue it should never run low and skipping won't help their decode.
   bool isLowOnDecodedAudio =
     !Reader()->IsAsync()
     && mMaster->IsAudioDecoding()
     && (mMaster->GetDecodedAudioDuration().ToMicroseconds()
         < mMaster->mLowAudioThreshold.ToMicroseconds() * mMaster->mPlaybackRate);
   bool isLowOnDecodedVideo =
-    (mMaster->GetClock().ToMicroseconds() - mMaster->mDecodedVideoEndTime)
+    (mMaster->GetClock() - mMaster->mDecodedVideoEndTime).ToMicroseconds()
     * mMaster->mPlaybackRate
     > LOW_VIDEO_THRESHOLD.ToMicroseconds();
   bool lowBuffered = mMaster->HasLowBufferedData();
 
   if ((isLowOnDecodedAudio || isLowOnDecodedVideo) && !lowBuffered) {
     SLOG("Skipping video decode to the next keyframe lowAudio=%d lowVideo=%d "
          "lowUndecoded=%d async=%d",
          isLowOnDecodedAudio, isLowOnDecodedVideo, lowBuffered,
@@ -2607,18 +2606,16 @@ MediaDecoderStateMachine::MediaDecoderSt
   mTaskQueue(new TaskQueue(GetMediaThreadPool(MediaThreadType::PLAYBACK),
                            /* aSupportsTailDispatch = */ true)),
   mWatchManager(this, mTaskQueue),
   mDispatchedStateMachine(false),
   mDelayedScheduler(mTaskQueue),
   mCurrentFrameID(0),
   INIT_WATCHABLE(mObservedDuration, TimeUnit()),
   mReader(new MediaDecoderReaderWrapper(mTaskQueue, aReader)),
-  mDecodedAudioEndTime(0),
-  mDecodedVideoEndTime(0),
   mPlaybackRate(1.0),
   mLowAudioThreshold(detail::LOW_AUDIO_THRESHOLD),
   mAmpleAudioThreshold(detail::AMPLE_AUDIO_THRESHOLD),
   mAudioCaptured(false),
   mMinimizePreroll(aDecoder->GetMinimizePreroll()),
   mSentLoadedMetadataEvent(false),
   mSentFirstFrameLoadedEvent(false),
   mVideoDecodeSuspended(false),
@@ -2759,19 +2756,17 @@ MediaDecoderStateMachine::CreateMediaSin
 TimeUnit
 MediaDecoderStateMachine::GetDecodedAudioDuration()
 {
   MOZ_ASSERT(OnTaskQueue());
   if (mMediaSink->IsStarted()) {
     // mDecodedAudioEndTime might be smaller than GetClock() when there is
     // overlap between 2 adjacent audio samples or when we are playing
     // a chained ogg file.
-    auto t = std::max<int64_t>(
-      mDecodedAudioEndTime - GetClock().ToMicroseconds(), 0);
-    return TimeUnit::FromMicroseconds(t);
+    return std::max(mDecodedAudioEndTime - GetClock(), TimeUnit::Zero());
   }
   // MediaSink not started. All audio samples are in the queue.
   return TimeUnit::FromMicroseconds(AudioQueue().Duration());
 }
 
 bool
 MediaDecoderStateMachine::HaveEnoughDecodedAudio()
 {
@@ -3165,18 +3160,18 @@ MediaDecoderStateMachine::RequestAudioDa
 
   RefPtr<MediaDecoderStateMachine> self = this;
   mReader->RequestAudioData()->Then(
     OwnerThread(), __func__,
     [this, self] (AudioData* aAudio) {
       MOZ_ASSERT(aAudio);
       mAudioDataRequest.Complete();
       // audio->GetEndTime() is not always mono-increasing in chained ogg.
-      mDecodedAudioEndTime =
-        std::max(aAudio->GetEndTime(), mDecodedAudioEndTime);
+      mDecodedAudioEndTime = std::max(
+        TimeUnit::FromMicroseconds(aAudio->GetEndTime()), mDecodedAudioEndTime);
       LOGV("OnAudioDecoded [%" PRId64 ",%" PRId64 "]", aAudio->mTime,
            aAudio->GetEndTime());
       mStateObj->HandleAudioDecoded(aAudio);
     },
     [this, self] (const MediaResult& aError) {
       LOGV("OnAudioNotDecoded aError=%" PRIu32, static_cast<uint32_t>(aError.Code()));
       mAudioDataRequest.Complete();
       switch (aError.Code()) {
@@ -3211,18 +3206,18 @@ MediaDecoderStateMachine::RequestVideoDa
   TimeStamp videoDecodeStartTime = TimeStamp::Now();
   RefPtr<MediaDecoderStateMachine> self = this;
   mReader->RequestVideoData(aSkipToNextKeyframe, aCurrentTime)->Then(
     OwnerThread(), __func__,
     [this, self, videoDecodeStartTime] (VideoData* aVideo) {
       MOZ_ASSERT(aVideo);
       mVideoDataRequest.Complete();
       // Handle abnormal or negative timestamps.
-      mDecodedVideoEndTime =
-        std::max(mDecodedVideoEndTime, aVideo->GetEndTime());
+      mDecodedVideoEndTime = std::max(
+        mDecodedVideoEndTime, TimeUnit::FromMicroseconds(aVideo->GetEndTime()));
       LOGV("OnVideoDecoded [%" PRId64 ",%" PRId64 "]", aVideo->mTime,
            aVideo->GetEndTime());
       mStateObj->HandleVideoDecoded(aVideo, videoDecodeStartTime);
     },
     [this, self] (const MediaResult& aError) {
       LOGV("OnVideoNotDecoded aError=%" PRIu32 , static_cast<uint32_t>(aError.Code()));
       mVideoDataRequest.Complete();
       switch (aError.Code()) {
@@ -3356,21 +3351,19 @@ MediaDecoderStateMachine::HasLowBuffered
 
   if (mBuffered.Ref().IsInvalid()) {
     return false;
   }
 
   // We are never low in decoded data when we don't have audio/video or have
   // decoded all audio/video samples.
   TimeUnit endOfDecodedVideo = (HasVideo() && !VideoQueue().IsFinished())
-    ? TimeUnit::FromMicroseconds(mDecodedVideoEndTime)
-    : TimeUnit::FromInfinity();
+    ? mDecodedVideoEndTime : TimeUnit::FromInfinity();
   TimeUnit endOfDecodedAudio = (HasAudio() && !AudioQueue().IsFinished())
-    ? TimeUnit::FromMicroseconds(mDecodedAudioEndTime)
-    : TimeUnit::FromInfinity();
+    ? mDecodedAudioEndTime : TimeUnit::FromInfinity();
 
   auto endOfDecodedData = std::min(endOfDecodedVideo, endOfDecodedAudio);
   if (Duration() < endOfDecodedData) {
     // Our duration is not up to date. No point buffering.
     return false;
   }
 
   if (endOfDecodedData.IsInfinite()) {
@@ -3476,25 +3469,25 @@ MediaDecoderStateMachine::ResetDecode(Tr
   MOZ_ASSERT(OnTaskQueue());
   LOG("MediaDecoderStateMachine::Reset");
 
   // Assert that aTracks specifies to reset the video track because we
   // don't currently support resetting just the audio track.
   MOZ_ASSERT(aTracks.contains(TrackInfo::kVideoTrack));
 
   if (aTracks.contains(TrackInfo::kVideoTrack)) {
-    mDecodedVideoEndTime = 0;
+    mDecodedVideoEndTime = TimeUnit::Zero();
     mVideoCompleted = false;
     VideoQueue().Reset();
     mVideoDataRequest.DisconnectIfExists();
     mVideoWaitRequest.DisconnectIfExists();
   }
 
   if (aTracks.contains(TrackInfo::kAudioTrack)) {
-    mDecodedAudioEndTime = 0;
+    mDecodedAudioEndTime = TimeUnit::Zero();
     mAudioCompleted = false;
     AudioQueue().Reset();
     mAudioDataRequest.DisconnectIfExists();
     mAudioWaitRequest.DisconnectIfExists();
   }
 
   mPlaybackOffset = 0;
 
@@ -3834,17 +3827,18 @@ MediaDecoderStateMachine::GetDebugInfo()
            "mSentFirstFrameLoadedEvent=%d IsPlaying=%d mAudioStatus=%s "
            "mVideoStatus=%s mDecodedAudioEndTime=%" PRId64
            " mDecodedVideoEndTime=%" PRId64 "mAudioCompleted=%d "
            "mVideoCompleted=%d",
            GetMediaTime().ToMicroseconds(),
            mMediaSink->IsStarted() ? GetClock().ToMicroseconds() : -1,
            mMediaSink.get(), ToStateStr(), mPlayState.Ref(),
            mSentFirstFrameLoadedEvent, IsPlaying(), AudioRequestStatus(),
-           VideoRequestStatus(), mDecodedAudioEndTime, mDecodedVideoEndTime,
+           VideoRequestStatus(), mDecodedAudioEndTime.ToMicroseconds(),
+           mDecodedVideoEndTime.ToMicroseconds(),
            mAudioCompleted, mVideoCompleted)
          + mStateObj->GetDebugInfo() + nsCString("\n")
          + mMediaSink->GetDebugInfo();
 }
 
 RefPtr<MediaDecoder::DebugInfoPromise>
 MediaDecoderStateMachine::RequestDebugInfo()
 {
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -557,21 +557,21 @@ private:
   media::TimeUnit AudioEndTime() const;
 
   // The end time of the last rendered video frame that's been sent to
   // compositor.
   media::TimeUnit VideoEndTime() const;
 
   // The end time of the last decoded audio frame. This signifies the end of
   // decoded audio data. Used to check if we are low in decoded data.
-  int64_t mDecodedAudioEndTime;
+  media::TimeUnit mDecodedAudioEndTime;
 
   // The end time of the last decoded video frame. Used to check if we are low
   // on decoded video data.
-  int64_t mDecodedVideoEndTime;
+  media::TimeUnit mDecodedVideoEndTime;
 
   // Playback rate. 1.0 : normal speed, 0.5 : two times slower.
   double mPlaybackRate;
 
   // If we've got more than this number of decoded video frames waiting in
   // the video queue, we will not decode any more video frames until some have
   // been consumed by the play state machine thread.
   // Must hold monitor.