Bug 1201363 - Call MediaStreamVideoSink::setCurrentFrames in SourceMediaStream::AppendToTrack. r?roc draft
authorctai <ctai@mozilla.com>
Mon, 18 Jan 2016 17:27:13 +0800
changeset 324270 3166a49869a153c081b9afa72fe987f8f4999d4f
parent 324269 df281ffafa7b113a7a49949d29df756a88711d5d
child 324271 d911ce72fa23e45e24509ca508cb0c17967ef1de
push id9869
push userbmo:ctai@mozilla.com
push dateFri, 22 Jan 2016 07:09:26 +0000
reviewersroc
bugs1201363
milestone46.0a1
Bug 1201363 - Call MediaStreamVideoSink::setCurrentFrames in SourceMediaStream::AppendToTrack. r?roc In this patch, we first deal with the case of MediaElement. Now we replace |PlayVideo| with |VideoFrameContainer::SetCurrentFrames| in |SourceMediaStream::AppendToTrack|. The MSG use TimeStamp::Now() for the TimeStamp of each video frame in most of case except MediaElement case. Becasue the MediaElement has its own VideoQueue, we need to calucalte the correct Timestamp based on the StartTimeStamp of this MediaStream and the elpased time of the video frame in DecodedStream.
dom/media/MediaStreamGraph.cpp
dom/media/MediaStreamGraph.h
dom/media/MediaStreamGraphImpl.h
dom/media/VideoSegment.cpp
dom/media/VideoSegment.h
dom/media/mediasink/DecodedStream.cpp
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -83,16 +83,24 @@ MediaStreamGraphImpl::FinishStream(Media
 
   SetStreamOrderDirty();
 }
 
 void
 MediaStreamGraphImpl::AddStreamGraphThread(MediaStream* aStream)
 {
   aStream->mTracksStartTime = mProcessedTime;
+  if (aStream->AsSourceStream()) {
+    SourceMediaStream* source = aStream->AsSourceStream();
+    TimeStamp currentTimeStamp = CurrentDriver()->GetCurrentTimeStamp();
+    TimeStamp processedTimeStamp = currentTimeStamp +
+      TimeDuration::FromSeconds(MediaTimeToSeconds(mProcessedTime - IterationEnd()));
+    source->SetStreamTracksStartTimeStamp(processedTimeStamp);
+  }
+
   if (aStream->IsSuspended()) {
     mSuspendedStreams.AppendElement(aStream);
     STREAM_LOG(LogLevel::Debug, ("Adding media stream %p to the graph, in the suspended stream array", aStream));
   } else {
     mStreams.AppendElement(aStream);
     STREAM_LOG(LogLevel::Debug, ("Adding media stream %p to the graph", aStream));
   }
 
@@ -396,16 +404,33 @@ MediaStreamGraphImpl::UpdateMediaStreamV
   for (size_t ix = 0; ix < existedFlags.Length(); ++ix) {
     if (!existedFlags[ix]) {
       removedVideoSinks.AppendElement(orignalVideoSinks[ix]);
     }
   }
 
   {
     MutexAutoLock lock(aStream->mMutex);
+    // Re-send missed VideoSegment to new added video sinks. Sometimes the
+    // |SourceMediaStream::AppendToTrack| will be called before the
+    // MediaStreamVideoSink actually added to the set of MediaStreamVideoSink in
+    // SourceMediaStream. So we need to re-send the missed video frames.
+    for (MediaStreamVideoSink* newAdded : newAddedVideoSinks) {
+      for (const SourceMediaStream::TrackData& trackData : aStream->mUpdateTracks)
+      {
+        if (trackData.mData->GetType() == MediaSegment::VIDEO) {
+          newAdded->SetCurrentFrames(*(static_cast<VideoSegment*>(trackData.mData.get())));
+        }
+      }
+    }
+    // Clear the video frame when the MediaStreamVideoSink is removed.
+    for (MediaStreamVideoSink* removed : removedVideoSinks) {
+      removed->ClearFrames();
+    }
+
     aStream->mVideoSinks.Assign(videoSinks);
   }
 }
 
 namespace {
   // Value of mCycleMarker for unvisited streams in cycle detection.
   const uint32_t NOT_VISITED = UINT32_MAX;
   // Value of mCycleMarker for ordered streams in muted cycles.
@@ -842,30 +867,16 @@ MediaStreamGraphImpl::PlayAudio(MediaStr
     // Need unique id for stream & track - and we want it to match the inserter
     output.WriteTo(LATENCY_STREAM_ID(aStream, track->GetID()),
                                      mMixer, AudioChannelCount(),
                                      mSampleRate);
   }
   return ticksWritten;
 }
 
-static void
-SetImageToBlackPixel(PlanarYCbCrImage* aImage)
-{
-  uint8_t blackPixel[] = { 0x10, 0x80, 0x80 };
-
-  PlanarYCbCrData data;
-  data.mYChannel = blackPixel;
-  data.mCbChannel = blackPixel + 1;
-  data.mCrChannel = blackPixel + 2;
-  data.mYStride = data.mCbCrStride = 1;
-  data.mPicSize = data.mYSize = data.mCbCrSize = IntSize(1, 1);
-  aImage->SetData(data);
-}
-
 class VideoFrameContainerInvalidateRunnable : public nsRunnable {
 public:
   explicit VideoFrameContainerInvalidateRunnable(VideoFrameContainer* aVideoFrameContainer)
     : mVideoFrameContainer(aVideoFrameContainer)
   {}
   NS_IMETHOD Run()
   {
     MOZ_ASSERT(NS_IsMainThread());
@@ -873,170 +884,16 @@ public:
     mVideoFrameContainer->Invalidate();
 
     return NS_OK;
   }
 private:
   RefPtr<VideoFrameContainer> mVideoFrameContainer;
 };
 
-void
-MediaStreamGraphImpl::PlayVideo(MediaStream* aStream)
-{
-  MOZ_ASSERT(mRealtime, "Should only attempt to play video in realtime mode");
-
-  if (aStream->mVideoOutputs.IsEmpty())
-    return;
-
-  TimeStamp currentTimeStamp = CurrentDriver()->GetCurrentTimeStamp();
-
-  // Collect any new frames produced in this iteration.
-  nsAutoTArray<ImageContainer::NonOwningImage,4> newImages;
-  RefPtr<Image> blackImage;
-
-  MOZ_ASSERT(mProcessedTime >= aStream->mTracksStartTime, "frame position before buffer?");
-  // We only look at the non-blocking interval
-  StreamTime frameBufferTime = aStream->GraphTimeToStreamTime(mProcessedTime);
-  StreamTime bufferEndTime = aStream->GraphTimeToStreamTime(aStream->mStartBlocking);
-  StreamTime start;
-  const VideoChunk* chunk;
-  for ( ;
-       frameBufferTime < bufferEndTime;
-       frameBufferTime = start + chunk->GetDuration()) {
-    // Pick the last track that has a video chunk for the time, and
-    // schedule its frame.
-    chunk = nullptr;
-    for (StreamTracks::TrackIter tracks(aStream->GetStreamTracks(),
-                                        MediaSegment::VIDEO);
-         !tracks.IsEnded();
-         tracks.Next()) {
-      VideoSegment* segment = tracks->Get<VideoSegment>();
-      StreamTime thisStart;
-      const VideoChunk* thisChunk =
-        segment->FindChunkContaining(frameBufferTime, &thisStart);
-      if (thisChunk && thisChunk->mFrame.GetImage()) {
-        start = thisStart;
-        chunk = thisChunk;
-      }
-    }
-    if (!chunk)
-      break;
-
-    const VideoFrame* frame = &chunk->mFrame;
-    if (*frame == aStream->mLastPlayedVideoFrame) {
-      continue;
-    }
-
-    Image* image = frame->GetImage();
-    STREAM_LOG(LogLevel::Verbose,
-               ("MediaStream %p writing video frame %p (%dx%d)",
-                aStream, image, frame->GetIntrinsicSize().width,
-                frame->GetIntrinsicSize().height));
-    // Schedule this frame after the previous frame finishes, instead of at
-    // its start time.  These times only differ in the case of multiple
-    // tracks.
-    // frameBufferTime is in the non-blocking interval.
-    GraphTime frameTime = aStream->StreamTimeToGraphTime(frameBufferTime);
-    TimeStamp targetTime = currentTimeStamp +
-      TimeDuration::FromSeconds(MediaTimeToSeconds(frameTime - IterationEnd()));
-
-    if (frame->GetForceBlack()) {
-      if (!blackImage) {
-        // Fixme: PlayVideo will be replaced in latter changeset
-        // "Call MediaStreamVideoSink::setCurrentFrames in SourceMediaStream::AppendToTrack."
-        // of this bug.
-        // This is a temp workaround to pass the build and test.
-        if (!aStream->mVideoOutputs[0]->AsVideoFrameContainer()) {
-          return;
-        }
-        blackImage = aStream->mVideoOutputs[0]->AsVideoFrameContainer()->
-          GetImageContainer()->CreatePlanarYCbCrImage();
-        if (blackImage) {
-          // Sets the image to a single black pixel, which will be scaled to
-          // fill the rendered size.
-          SetImageToBlackPixel(blackImage->AsPlanarYCbCrImage());
-        }
-      }
-      if (blackImage) {
-        image = blackImage;
-      }
-    }
-    newImages.AppendElement(ImageContainer::NonOwningImage(image, targetTime));
-
-    aStream->mLastPlayedVideoFrame = *frame;
-  }
-
-  if (!aStream->mLastPlayedVideoFrame.GetImage())
-    return;
-
-  nsAutoTArray<ImageContainer::NonOwningImage,4> images;
-  bool haveMultipleImages = false;
-
-  for (MediaStreamVideoSink* sink : aStream->mVideoOutputs) {
-    VideoFrameContainer* output = sink->AsVideoFrameContainer();
-    if (!output) {
-      continue;
-    }
-
-    // Find previous frames that may still be valid.
-    nsAutoTArray<ImageContainer::OwningImage,4> previousImages;
-    output->GetImageContainer()->GetCurrentImages(&previousImages);
-    uint32_t j = previousImages.Length();
-    if (j) {
-      // Re-use the most recent frame before currentTimeStamp and subsequent,
-      // always keeping at least one frame.
-      do {
-        --j;
-      } while (j > 0 && previousImages[j].mTimeStamp > currentTimeStamp);
-    }
-    if (previousImages.Length() - j + newImages.Length() > 1) {
-      haveMultipleImages = true;
-    }
-
-    // Don't update if there are no changes.
-    if (j == 0 && newImages.IsEmpty())
-      continue;
-
-    for ( ; j < previousImages.Length(); ++j) {
-      const auto& image = previousImages[j];
-      // Cope with potential clock skew with AudioCallbackDriver.
-      if (newImages.Length() && image.mTimeStamp > newImages[0].mTimeStamp) {
-        STREAM_LOG(LogLevel::Warning,
-                   ("Dropping %u video frames due to clock skew",
-                    unsigned(previousImages.Length() - j)));
-        break;
-      }
-
-      images.AppendElement(ImageContainer::
-                           NonOwningImage(image.mImage,
-                                          image.mTimeStamp, image.mFrameID));
-    }
-
-    // Add the frames from this iteration.
-    for (auto& image : newImages) {
-      image.mFrameID = output->NewFrameID();
-      images.AppendElement(image);
-    }
-    output->SetCurrentFrames(aStream->mLastPlayedVideoFrame.GetIntrinsicSize(),
-                             images);
-
-    nsCOMPtr<nsIRunnable> event =
-      new VideoFrameContainerInvalidateRunnable(output);
-    DispatchToMainThreadAfterStreamStateUpdate(event.forget());
-
-    images.ClearAndRetainStorage();
-  }
-
-  // If the stream has finished and the timestamps of all frames have expired
-  // then no more updates are required.
-  if (aStream->mFinished && !haveMultipleImages) {
-    aStream->mLastPlayedVideoFrame.SetNull();
-  }
-}
-
 bool
 MediaStreamGraphImpl::ShouldUpdateMainThread()
 {
   if (mRealtime) {
     return true;
   }
 
   TimeStamp now = TimeStamp::Now();
@@ -1269,17 +1126,16 @@ MediaStreamGraphImpl::Process()
         StreamTime ticksPlayedForThisStream = PlayAudio(stream);
         if (!ticksPlayed) {
           ticksPlayed = ticksPlayedForThisStream;
         } else {
           MOZ_ASSERT(!ticksPlayedForThisStream || ticksPlayedForThisStream == ticksPlayed,
               "Each stream should have the same number of frame.");
         }
       }
-      PlayVideo(stream);
     }
     if (stream->mStartBlocking > mProcessedTime) {
       allBlockedForever = false;
     }
   }
 
   if (CurrentDriver()->AsAudioCallbackDriver() && ticksPlayed) {
     mMixer.FinishMixing();
@@ -2357,16 +2213,26 @@ SourceMediaStream::ResampleAudioToGraphS
       return;
     }
     aTrackData->mResampler.own(state);
     aTrackData->mResamplerChannelCount = channels;
   }
   segment->ResampleChunks(aTrackData->mResampler, aTrackData->mInputRate, GraphImpl()->GraphRate());
 }
 
+void
+SourceMediaStream::AdvanceTimeVaryingValuesToCurrentTime(GraphTime aCurrentTime,
+                                      GraphTime aBlockedTime)
+{
+  MutexAutoLock lock(mMutex);
+  mTracksStartTime += aBlockedTime;
+  mStreamTracksStartTimeStamp += TimeDuration::FromSeconds(GraphImpl()->MediaTimeToSeconds(aBlockedTime));
+  mTracks.ForgetUpTo(aCurrentTime - mTracksStartTime);
+}
+
 bool
 SourceMediaStream::AppendToTrack(TrackID aID, MediaSegment* aSegment, MediaSegment *aRawSegment)
 {
   MutexAutoLock lock(mMutex);
   // ::EndAllTrackAndFinished() can end these before the sources notice
   bool appended = false;
   auto graph = GraphImpl();
   if (!mFinished && graph) {
@@ -2381,16 +2247,22 @@ SourceMediaStream::AppendToTrack(TrackID
       // Apply track disabling before notifying any consumers directly
       // or inserting into the graph
       ApplyTrackDisabling(aID, aSegment, aRawSegment);
 
       ResampleAudioToGraphSampleRate(track, aSegment);
 
       // Must notify first, since AppendFrom() will empty out aSegment
       NotifyDirectConsumers(track, aRawSegment ? aRawSegment : aSegment);
+      if (aSegment->GetType() == MediaSegment::VIDEO) {
+        for (MediaStreamVideoSink* sink : mVideoSinks) {
+          sink->SetCurrentFrames(static_cast<const VideoSegment&>(*aSegment));
+        }
+      }
+
       track->mData->AppendFrom(aSegment); // note: aSegment is now dead
       appended = true;
       GraphImpl()->EnsureNextIteration();
     } else {
       aSegment->Clear();
     }
   }
   return appended;
--- a/dom/media/MediaStreamGraph.h
+++ b/dom/media/MediaStreamGraph.h
@@ -558,17 +558,18 @@ public:
   void IncrementSuspendCount() { ++mSuspendedCount; }
   void DecrementSuspendCount()
   {
     NS_ASSERTION(mSuspendedCount > 0, "Suspend count underrun");
     --mSuspendedCount;
   }
 
 protected:
-  void AdvanceTimeVaryingValuesToCurrentTime(GraphTime aCurrentTime, GraphTime aBlockedTime)
+  // |AdvanceTimeVaryingValuesToCurrentTime| will be override in SourceMediaStream.
+  virtual void AdvanceTimeVaryingValuesToCurrentTime(GraphTime aCurrentTime, GraphTime aBlockedTime)
   {
     mTracksStartTime += aBlockedTime;
     mTracks.ForgetUpTo(aCurrentTime - mTracksStartTime);
   }
 
   void NotifyMainThreadListeners()
   {
     NS_ASSERTION(NS_IsMainThread(), "Call only on main thread");
@@ -697,16 +698,17 @@ protected:
  */
 class SourceMediaStream : public MediaStream
 {
 public:
   explicit SourceMediaStream(DOMMediaStream* aWrapper) :
     MediaStream(aWrapper),
     mMutex("mozilla::media::SourceMediaStream"),
     mUpdateKnownTracksTime(0),
+    mStreamTracksStartTimeStamp(TimeStamp::Now()),
     mPullEnabled(false),
     mUpdateFinished(false),
     mNeedsMixing(false)
   {}
 
   SourceMediaStream* AsSourceStream() override { return this; }
 
   // Media graph thread only
@@ -822,16 +824,21 @@ public:
   /**
    * End all tracks and Finish() this stream.  Used to voluntarily revoke access
    * to a LocalMediaStream.
    */
   void EndAllTrackAndFinish();
 
   void RegisterForAudioMixing();
 
+  TimeStamp GetStreamTracksStrartTimeStamp() {
+    MutexAutoLock lock(mMutex);
+    return mStreamTracksStartTimeStamp;
+  }
+
   // XXX need a Reset API
 
   friend class MediaStreamGraphImpl;
 
 protected:
   struct ThreadAndRunnable {
     void Init(TaskQueue* aTarget, nsIRunnable* aRunnable)
     {
@@ -891,21 +898,34 @@ protected:
    * Notify direct consumers of new data to one of the stream tracks.
    * The data doesn't have to be resampled (though it may be).  This is called
    * from AppendToTrack on the thread providing the data, and will call
    * the Listeners on this thread.
    */
   void NotifyDirectConsumers(TrackData *aTrack,
                              MediaSegment *aSegment);
 
+  virtual void
+  AdvanceTimeVaryingValuesToCurrentTime(GraphTime aCurrentTime,
+                                        GraphTime aBlockedTime) override;
+  void SetStreamTracksStartTimeStamp(const TimeStamp& aTimeStamp)
+  {
+    MutexAutoLock lock(mMutex);
+    mStreamTracksStartTimeStamp = aTimeStamp;
+  }
+
   // This must be acquired *before* MediaStreamGraphImpl's lock, if they are
   // held together.
   Mutex mMutex;
   // protected by mMutex
   StreamTime mUpdateKnownTracksTime;
+  // This time stamp will be updated in adding and blocked SourceMediaStream,
+  // |AddStreamGraphThread| and |AdvanceTimeVaryingValuesToCurrentTime| in
+  // particularly.
+  TimeStamp mStreamTracksStartTimeStamp;
   nsTArray<TrackData> mUpdateTracks;
   nsTArray<TrackData> mPendingTracks;
   nsTArray<RefPtr<MediaStreamDirectListener> > mDirectListeners;
   // The set of MediaStreamVideoSink. It should be only updated in
   // |MediaStreamGraphImpl::UpdateMediaStreamVideoSinkSet|
   nsTArray<RefPtr<MediaStreamVideoSink>> mVideoSinks;
   bool mPullEnabled;
   bool mUpdateFinished;
--- a/dom/media/MediaStreamGraphImpl.h
+++ b/dom/media/MediaStreamGraphImpl.h
@@ -339,20 +339,16 @@ public:
    */
   void CreateOrDestroyAudioStreams(MediaStream* aStream);
   /**
    * Queue audio (mix of stream audio and silence for blocked intervals)
    * to the audio output stream. Returns the number of frames played.
    */
   StreamTime PlayAudio(MediaStream* aStream);
   /**
-   * Set the correct current video frame for stream aStream.
-   */
-  void PlayVideo(MediaStream* aStream);
-  /**
    * No more data will be forthcoming for aStream. The stream will end
    * at the current buffer end point. The StreamTracks's tracks must be
    * explicitly set to finished by the caller.
    */
   void FinishStream(MediaStream* aStream);
   /**
    * Compute how much stream data we would like to buffer for aStream.
    */
--- a/dom/media/VideoSegment.cpp
+++ b/dom/media/VideoSegment.cpp
@@ -91,19 +91,21 @@ VideoChunk::VideoChunk()
 
 VideoChunk::~VideoChunk()
 {}
 
 void
 VideoSegment::AppendFrame(already_AddRefed<Image>&& aImage,
                           StreamTime aDuration,
                           const IntSize& aIntrinsicSize,
-                          bool aForceBlack)
+                          bool aForceBlack,
+                          TimeStamp aTimeStamp)
 {
   VideoChunk* chunk = AppendChunk(aDuration);
+  chunk->mTimeStamp = aTimeStamp;
   VideoFrame frame(aImage, aIntrinsicSize);
   frame.SetForceBlack(aForceBlack);
   chunk->mFrame.TakeFrom(&frame);
 }
 
 VideoSegment::VideoSegment()
   : MediaSegmentBase<VideoSegment, VideoChunk>(VIDEO)
 {}
--- a/dom/media/VideoSegment.h
+++ b/dom/media/VideoSegment.h
@@ -111,17 +111,18 @@ public:
   typedef mozilla::gfx::IntSize IntSize;
 
   VideoSegment();
   ~VideoSegment();
 
   void AppendFrame(already_AddRefed<Image>&& aImage,
                    StreamTime aDuration,
                    const IntSize& aIntrinsicSize,
-                   bool aForceBlack = false);
+                   bool aForceBlack = false,
+                   TimeStamp = TimeStamp::Now());
   const VideoFrame* GetLastFrame(StreamTime* aStart = nullptr)
   {
     VideoChunk* c = GetLastChunk();
     if (!c) {
       return nullptr;
     }
     if (aStart) {
       *aStart = mDuration - c->mDuration;
--- a/dom/media/mediasink/DecodedStream.cpp
+++ b/dom/media/mediasink/DecodedStream.cpp
@@ -551,23 +551,24 @@ DecodedStream::SendAudio(double aVolume,
 }
 
 static void
 WriteVideoToMediaStream(MediaStream* aStream,
                         layers::Image* aImage,
                         int64_t aEndMicroseconds,
                         int64_t aStartMicroseconds,
                         const mozilla::gfx::IntSize& aIntrinsicSize,
+                        const TimeStamp& aTimeStamp,
                         VideoSegment* aOutput)
 {
   RefPtr<layers::Image> image = aImage;
   StreamTime duration =
       aStream->MicrosecondsToStreamTimeRoundDown(aEndMicroseconds) -
       aStream->MicrosecondsToStreamTimeRoundDown(aStartMicroseconds);
-  aOutput->AppendFrame(image.forget(), duration, aIntrinsicSize);
+  aOutput->AppendFrame(image.forget(), duration, aIntrinsicSize, false, aTimeStamp);
 }
 
 static bool
 ZeroDurationAtLastChunk(VideoSegment& aInput)
 {
   // Get the last video frame's start time in VideoSegment aInput.
   // If the start time is equal to the duration of aInput, means the last video
   // frame's duration is zero.
@@ -603,23 +604,27 @@ DecodedStream::SendVideo(bool aIsSameOri
 
       // TODO: |mLastVideoImage| should come from the last image rendered
       // by the state machine. This will avoid the black frame when capture
       // happens in the middle of playback (especially in th middle of a
       // video frame). E.g. if we have a video frame that is 30 sec long
       // and capture happens at 15 sec, we'll have to append a black frame
       // that is 15 sec long.
       WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage, v->mTime,
-          mData->mNextVideoTime, mData->mLastVideoImageDisplaySize, &output);
+        mData->mNextVideoTime, mData->mLastVideoImageDisplaySize,
+        sourceStream->GetStreamTracksStrartTimeStamp() + TimeDuration::FromMicroseconds(v->mTime),
+        &output);
       mData->mNextVideoTime = v->mTime;
     }
 
     if (mData->mNextVideoTime < v->GetEndTime()) {
       WriteVideoToMediaStream(sourceStream, v->mImage,
-          v->GetEndTime(), mData->mNextVideoTime, v->mDisplay, &output);
+        v->GetEndTime(), mData->mNextVideoTime, v->mDisplay,
+        sourceStream->GetStreamTracksStrartTimeStamp() + TimeDuration::FromMicroseconds(v->GetEndTime()),
+        &output);
       mData->mNextVideoTime = v->GetEndTime();
       mData->mLastVideoImage = v->mImage;
       mData->mLastVideoImageDisplaySize = v->mDisplay;
     }
   }
 
   // Check the output is not empty.
   if (output.GetLastFrame()) {
@@ -635,18 +640,20 @@ DecodedStream::SendVideo(bool aIsSameOri
   }
 
   if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
     if (mData->mEOSVideoCompensation) {
       VideoSegment endSegment;
       // Calculate the deviation clock time from DecodedStream.
       int64_t deviation_usec = sourceStream->StreamTimeToMicroseconds(1);
       WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage,
-          mData->mNextVideoTime + deviation_usec, mData->mNextVideoTime,
-          mData->mLastVideoImageDisplaySize, &endSegment);
+        mData->mNextVideoTime + deviation_usec, mData->mNextVideoTime,
+        mData->mLastVideoImageDisplaySize,
+        sourceStream->GetStreamTracksStrartTimeStamp() + TimeDuration::FromMicroseconds(mData->mNextVideoTime + deviation_usec),
+        &endSegment);
       mData->mNextVideoTime += deviation_usec;
       MOZ_ASSERT(endSegment.GetDuration() > 0);
       if (!aIsSameOrigin) {
         endSegment.ReplaceWithDisabled();
       }
       sourceStream->AppendToTrack(videoTrackId, &endSegment);
     }
     sourceStream->EndTrack(videoTrackId);