Bug 1323931. Part 5 - remove mSeekedAudioData and mSeekedVideoData. r?kaku draft
authorJW Wang <jwwang@mozilla.com>
Fri, 16 Dec 2016 16:01:57 +0800
changeset 450325 87d6386577b5f11b9234c8264832f79a8e24f917
parent 450324 d5aaf086df264f02e5ea6536a1446d06e69fb10e
child 450326 1de474cbf974b38f9ac5420dad78ec72ae48a06b
push id38830
push userjwwang@mozilla.com
push dateFri, 16 Dec 2016 10:01:24 +0000
reviewerskaku
bugs1323931
milestone53.0a1
Bug 1323931. Part 5 - remove mSeekedAudioData and mSeekedVideoData. r?kaku MozReview-Commit-ID: Ezd4J30EooO
dom/media/MediaDecoderStateMachine.cpp
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -235,18 +235,18 @@ protected:
   bool IsExpectingMoreData() const
   {
     // We are expecting more data if either the resource states so, or if we
     // have a waiting promise pending (such as with non-MSE EME).
     return Resource()->IsExpectingMoreData() ||
            (Reader()->IsWaitForDataSupported() &&
             (Reader()->IsWaitingAudioData() || Reader()->IsWaitingVideoData()));
   }
-  MediaQueue<MediaData>& AudioQueue() { return mMaster->mAudioQueue; }
-  MediaQueue<MediaData>& VideoQueue() { return mMaster->mVideoQueue; }
+  MediaQueue<MediaData>& AudioQueue() const { return mMaster->mAudioQueue; }
+  MediaQueue<MediaData>& VideoQueue() const { return mMaster->mVideoQueue; }
 
   // Note this function will delete the current state object.
   // Don't access members to avoid UAF after this call.
   template <class S, typename... Ts>
   auto SetState(Ts... aArgs)
     -> decltype(ReturnTypeHelper(&S::Enter))
   {
     // keep mMaster in a local object because mMaster will become invalid after
@@ -893,25 +893,25 @@ public:
   {
     MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, "Seek shouldn't be finished");
     MOZ_ASSERT(aAudio);
 
     // Video-only seek doesn't reset audio decoder. There might be pending audio
     // requests when AccurateSeekTask::Seek() begins. We will just store the data
     // without checking |mDiscontinuity| or calling DropAudioUpToSeekTarget().
     if (mSeekJob.mTarget.IsVideoOnly()) {
-      mSeekedAudioData = aAudio;
+      mMaster->Push(aAudio);
       return;
     }
 
     AdjustFastSeekIfNeeded(aAudio);
 
     if (mSeekJob.mTarget.IsFast()) {
       // Non-precise seek; we can stop the seek at the first sample.
-      mSeekedAudioData = aAudio;
+      mMaster->Push(aAudio);
       mDoneAudioSeeking = true;
     } else {
       nsresult rv = DropAudioUpToSeekTarget(aAudio->As<AudioData>());
       if (NS_FAILED(rv)) {
         OnSeekTaskRejected(rv);
         return;
       }
     }
@@ -927,17 +927,17 @@ public:
   {
     MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, "Seek shouldn't be finished");
     MOZ_ASSERT(aVideo);
 
     AdjustFastSeekIfNeeded(aVideo);
 
     if (mSeekJob.mTarget.IsFast()) {
       // Non-precise seek. We can stop the seek at the first sample.
-      mSeekedVideoData = aVideo;
+      mMaster->Push(aVideo);
       mDoneVideoSeeking = true;
     } else {
       nsresult rv = DropVideoUpToSeekTarget(aVideo);
       if (NS_FAILED(rv)) {
         OnSeekTaskRejected(rv);
         return;
       }
     }
@@ -979,17 +979,17 @@ public:
         AudioQueue().Finish();
         mDoneAudioSeeking = true;
       } else {
         VideoQueue().Finish();
         mDoneVideoSeeking = true;
         if (mFirstVideoFrameAfterSeek) {
           // Hit the end of stream. Move mFirstVideoFrameAfterSeek into
           // mSeekedVideoData so we have something to display after seeking.
-          mSeekedVideoData = mFirstVideoFrameAfterSeek.forget();
+          mMaster->Push(mFirstVideoFrameAfterSeek);
         }
       }
       MaybeFinishSeek();
       return;
     }
 
     // This is a decode error, delegate to the generic error path.
     OnSeekTaskRejected(aError);
@@ -1058,24 +1058,26 @@ private:
     // the real decoded samples' start time.
     if (mSeekJob.mTarget.IsAccurate()) {
       return seekTime;
     }
 
     // For the fast seek, we update the newCurrentTime with the decoded audio and
     // video samples, set it to be the one which is closet to the seekTime.
     if (mSeekJob.mTarget.IsFast()) {
+      RefPtr<MediaData> audio = AudioQueue().PeekFront();
+      RefPtr<MediaData> video = VideoQueue().PeekFront();
 
       // A situation that both audio and video approaches the end.
-      if (!mSeekedAudioData && !mSeekedVideoData) {
+      if (!audio && !video) {
         return seekTime;
       }
 
-      const int64_t audioStart = mSeekedAudioData ? mSeekedAudioData->mTime : INT64_MAX;
-      const int64_t videoStart = mSeekedVideoData ? mSeekedVideoData->mTime : INT64_MAX;
+      const int64_t audioStart = audio ? audio->mTime : INT64_MAX;
+      const int64_t videoStart = video ? video->mTime : INT64_MAX;
       const int64_t audioGap = std::abs(audioStart - seekTime);
       const int64_t videoGap = std::abs(videoStart - seekTime);
       return audioGap <= videoGap ? audioStart : videoStart;
     }
 
     MOZ_ASSERT(false, "AccurateSeekTask doesn't handle other seek types.");
     return 0;
   }
@@ -1150,17 +1152,17 @@ private:
       // The seek target doesn't lie in the audio block just after the last
       // audio frames we've seen which were before the seek target. This
       // could have been the first audio data we've seen after seek, i.e. the
       // seek terminated after the seek target in the audio stream. Just
       // abort the audio decode-to-target, the state machine will play
       // silence to cover the gap. Typically this happens in poorly muxed
       // files.
       SWARN("Audio not synced after seek, maybe a poorly muxed file?");
-      mSeekedAudioData = aAudio;
+      mMaster->Push(aAudio);
       mDoneAudioSeeking = true;
       return NS_OK;
     }
 
     // The seek target lies somewhere in this AudioData's frames, strip off
     // any frames which lie before the seek target, so we'll begin playback
     // exactly at the seek target.
     NS_ASSERTION(mSeekJob.mTarget.GetTime().ToMicroseconds() >= aAudio->mTime,
@@ -1195,18 +1197,18 @@ private:
     }
     RefPtr<AudioData> data(new AudioData(aAudio->mOffset,
                                          mSeekJob.mTarget.GetTime().ToMicroseconds(),
                                          duration.value(),
                                          frames,
                                          Move(audioData),
                                          channels,
                                          aAudio->mRate));
-    MOZ_ASSERT(!mSeekedAudioData, "Should be the 1st sample after seeking");
-    mSeekedAudioData = data.forget();
+    MOZ_ASSERT(AudioQueue().GetSize() == 0, "Should be the 1st sample after seeking");
+    mMaster->Push(data);
     mDoneAudioSeeking = true;
 
     return NS_OK;
   }
 
   nsresult DropVideoUpToSeekTarget(MediaData* aSample)
   {
     RefPtr<VideoData> video(aSample->As<VideoData>());
@@ -1229,41 +1231,33 @@ private:
         RefPtr<VideoData> temp = VideoData::ShallowCopyUpdateTimestamp(video.get(), target);
         video = temp;
       }
       mFirstVideoFrameAfterSeek = nullptr;
 
       SLOG("DropVideoUpToSeekTarget() found video frame [%lld, %lld] containing target=%lld",
                   video->mTime, video->GetEndTime(), target);
 
-      MOZ_ASSERT(!mSeekedVideoData, "Should be the 1st sample after seeking");
-      mSeekedVideoData = video;
+      MOZ_ASSERT(VideoQueue().GetSize() == 0, "Should be the 1st sample after seeking");
+      mMaster->Push(video);
       mDoneVideoSeeking = true;
     }
 
     return NS_OK;
   }
 
   void MaybeFinishSeek()
   {
     if (mDoneAudioSeeking && mDoneVideoSeeking) {
       OnSeekTaskResolved();
     }
   }
 
   void OnSeekTaskResolved()
   {
-    if (mSeekedAudioData) {
-      mMaster->Push(mSeekedAudioData);
-    }
-
-    if (mSeekedVideoData) {
-      mMaster->Push(mSeekedVideoData);
-    }
-
     SeekCompleted();
   }
 
   void OnSeekTaskRejected(const MediaResult& aError)
   {
     mMaster->DecodeError(aError);
   }
 
@@ -1279,22 +1273,16 @@ private:
   bool mDoneAudioSeeking = false;
   bool mDoneVideoSeeking = false;
 
   // This temporarily stores the first frame we decode after we seek.
   // This is so that if we hit end of stream while we're decoding to reach
   // the seek target, we will still have a frame that we can display as the
   // last frame in the media.
   RefPtr<MediaData> mFirstVideoFrameAfterSeek;
-
-  /*
-   * Information which are going to be returned to MDSM.
-   */
-  RefPtr<MediaData> mSeekedAudioData;
-  RefPtr<MediaData> mSeekedVideoData;
 };
 
 class MediaDecoderStateMachine::NextFrameSeekingState
   : public MediaDecoderStateMachine::SeekingState
 {
 public:
   explicit NextFrameSeekingState(Master* aPtr) : SeekingState(aPtr)
   {