--- a/dom/media/mediasink/DecodedStream.cpp
+++ b/dom/media/mediasink/DecodedStream.cpp
@@ -14,16 +14,25 @@
#include "MediaStreamGraph.h"
#include "OutputStreamManager.h"
#include "SharedBuffer.h"
#include "VideoSegment.h"
#include "VideoUtils.h"
namespace mozilla {
+/*
+ * A container class to make it easier to pass the playback info all the
+ * way to DecodedStreamGraphListener from DecodedStream.
+ */
+struct PlaybackInfoInit {
+ int64_t mStartTime;
+ MediaInfo mInfo;
+};
+
class DecodedStreamGraphListener : public MediaStreamListener {
typedef MediaStreamListener::MediaStreamGraphEvent MediaStreamGraphEvent;
public:
DecodedStreamGraphListener(MediaStream* aStream,
MozPromiseHolder<GenericPromise>&& aPromise)
: mMutex("DecodedStreamGraphListener::mMutex")
, mStream(aStream)
, mLastOutputTime(aStream->StreamTimeToMicroseconds(aStream->GetCurrentTime()))
@@ -103,16 +112,17 @@ UpdateStreamSuspended(MediaStream* aStre
* is used as the input for each ProcessedMediaStream created by calls to
* captureStream(UntilEnded). Seeking creates a new source stream, as does
* replaying after the input as ended. In the latter case, the new source is
* not connected to streams created by captureStreamUntilEnded.
*/
class DecodedStreamData {
public:
DecodedStreamData(OutputStreamManager* aOutputStreamManager,
+ PlaybackInfoInit&& aInit,
MozPromiseHolder<GenericPromise>&& aPromise);
~DecodedStreamData();
int64_t GetPosition() const;
void SetPlaying(bool aPlaying);
/* The following group of fields are protected by the decoder's monitor
* and can be read or written on any thread.
*/
@@ -122,54 +132,61 @@ public:
// Therefore video packets starting at or after this time need to be copied
// to the output stream.
int64_t mNextVideoTime; // microseconds
int64_t mNextAudioTime; // microseconds
// The last video image sent to the stream. Useful if we need to replicate
// the image.
RefPtr<layers::Image> mLastVideoImage;
gfx::IntSize mLastVideoImageDisplaySize;
- // This is set to true when the stream is initialized (audio and
- // video tracks added).
- bool mStreamInitialized;
bool mHaveSentFinish;
bool mHaveSentFinishAudio;
bool mHaveSentFinishVideo;
// The decoder is responsible for calling Destroy() on this stream.
const RefPtr<SourceMediaStream> mStream;
const RefPtr<DecodedStreamGraphListener> mListener;
bool mPlaying;
// True if we need to send a compensation video frame to ensure the
// StreamTime going forward.
bool mEOSVideoCompensation;
const RefPtr<OutputStreamManager> mOutputStreamManager;
};
DecodedStreamData::DecodedStreamData(OutputStreamManager* aOutputStreamManager,
+ PlaybackInfoInit&& aInit,
MozPromiseHolder<GenericPromise>&& aPromise)
: mAudioFramesWritten(0)
- , mNextVideoTime(-1)
- , mNextAudioTime(-1)
- , mStreamInitialized(false)
+ , mNextVideoTime(aInit.mStartTime)
+ , mNextAudioTime(aInit.mStartTime)
, mHaveSentFinish(false)
, mHaveSentFinishAudio(false)
, mHaveSentFinishVideo(false)
, mStream(aOutputStreamManager->Graph()->CreateSourceStream(nullptr))
// DecodedStreamGraphListener will resolve this promise.
, mListener(new DecodedStreamGraphListener(mStream, Move(aPromise)))
// mPlaying is initially true because MDSM won't start playback until playing
// becomes true. This is consistent with the settings of AudioSink.
, mPlaying(true)
, mEOSVideoCompensation(false)
, mOutputStreamManager(aOutputStreamManager)
{
mStream->AddListener(mListener);
mOutputStreamManager->Connect(mStream);
+
+ // Initialize tracks.
+ if (aInit.mInfo.HasAudio()) {
+ mStream->AddAudioTrack(aInit.mInfo.mAudio.mTrackId,
+ aInit.mInfo.mAudio.mRate,
+ 0, new AudioSegment());
+ }
+ if (aInit.mInfo.HasVideo()) {
+ mStream->AddTrack(aInit.mInfo.mVideo.mTrackId, 0, new VideoSegment());
+ }
}
DecodedStreamData::~DecodedStreamData()
{
mOutputStreamManager->Disconnect();
mListener->Forget();
mStream->Destroy();
}
@@ -247,37 +264,41 @@ DecodedStream::Start(int64_t aStartTime,
mStartTime.emplace(aStartTime);
mInfo = aInfo;
mPlaying = true;
ConnectListener();
class R : public nsRunnable {
typedef MozPromiseHolder<GenericPromise> Promise;
- typedef void(DecodedStream::*Method)(Promise&&);
+ typedef decltype(&DecodedStream::CreateData) Method;
public:
- R(DecodedStream* aThis, Method aMethod, Promise&& aPromise)
- : mThis(aThis), mMethod(aMethod)
+ R(DecodedStream* aThis, Method aMethod, PlaybackInfoInit&& aInit, Promise&& aPromise)
+ : mThis(aThis), mMethod(aMethod), mInit(Move(aInit))
{
mPromise = Move(aPromise);
}
NS_IMETHOD Run() override
{
- (mThis->*mMethod)(Move(mPromise));
+ (mThis->*mMethod)(Move(mInit), Move(mPromise));
return NS_OK;
}
private:
RefPtr<DecodedStream> mThis;
Method mMethod;
+ PlaybackInfoInit mInit;
Promise mPromise;
};
MozPromiseHolder<GenericPromise> promise;
mFinishPromise = promise.Ensure(__func__);
- nsCOMPtr<nsIRunnable> r = new R(this, &DecodedStream::CreateData, Move(promise));
+ PlaybackInfoInit init {
+ aStartTime, aInfo
+ };
+ nsCOMPtr<nsIRunnable> r = new R(this, &DecodedStream::CreateData, Move(init), Move(promise));
AbstractThread::MainThread()->Dispatch(r.forget());
}
void
DecodedStream::Stop()
{
AssertOwnerThread();
MOZ_ASSERT(mStartTime.isSome(), "playback not started.");
@@ -317,29 +338,29 @@ DecodedStream::DestroyData(UniquePtr<Dec
DecodedStreamData* data = aData.release();
nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([=] () {
delete data;
});
AbstractThread::MainThread()->Dispatch(r.forget());
}
void
-DecodedStream::CreateData(MozPromiseHolder<GenericPromise>&& aPromise)
+DecodedStream::CreateData(PlaybackInfoInit&& aInit, MozPromiseHolder<GenericPromise>&& aPromise)
{
MOZ_ASSERT(NS_IsMainThread());
// No need to create a source stream when there are no output streams. This
// happens when RemoveOutput() is called immediately after StartPlayback().
if (!mOutputStreamManager->Graph()) {
// Resolve the promise to indicate the end of playback.
aPromise.Resolve(true, __func__);
return;
}
- auto data = new DecodedStreamData(mOutputStreamManager, Move(aPromise));
+ auto data = new DecodedStreamData(mOutputStreamManager, Move(aInit), Move(aPromise));
class R : public nsRunnable {
typedef void(DecodedStream::*Method)(UniquePtr<DecodedStreamData>);
public:
R(DecodedStream* aThis, Method aMethod, DecodedStreamData* aData)
: mThis(aThis), mMethod(aMethod), mData(aData) {}
NS_IMETHOD Run() override
{
@@ -424,47 +445,16 @@ DecodedStream::SetPlaybackRate(double aP
void
DecodedStream::SetPreservesPitch(bool aPreservesPitch)
{
AssertOwnerThread();
mParams.mPreservesPitch = aPreservesPitch;
}
-void
-DecodedStream::InitTracks()
-{
- AssertOwnerThread();
-
- if (mData->mStreamInitialized) {
- return;
- }
-
- SourceMediaStream* sourceStream = mData->mStream;
-
- if (mInfo.HasAudio()) {
- TrackID audioTrackId = mInfo.mAudio.mTrackId;
- AudioSegment* audio = new AudioSegment();
- sourceStream->AddAudioTrack(audioTrackId, mInfo.mAudio.mRate, 0, audio,
- SourceMediaStream::ADDTRACK_QUEUED);
- mData->mNextAudioTime = mStartTime.ref();
- }
-
- if (mInfo.HasVideo()) {
- TrackID videoTrackId = mInfo.mVideo.mTrackId;
- VideoSegment* video = new VideoSegment();
- sourceStream->AddTrack(videoTrackId, 0, video,
- SourceMediaStream::ADDTRACK_QUEUED);
- mData->mNextVideoTime = mStartTime.ref();
- }
-
- sourceStream->FinishAddTracks();
- mData->mStreamInitialized = true;
-}
-
static void
SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
MediaData* aData, AudioSegment* aOutput,
uint32_t aRate, double aVolume)
{
// The amount of audio frames that is used to fuzz rounding errors.
static const int64_t AUDIO_FUZZ_FRAMES = 1;
@@ -687,17 +677,16 @@ DecodedStream::SendData()
return;
}
// Nothing to do when the stream is finished.
if (mData->mHaveSentFinish) {
return;
}
- InitTracks();
SendAudio(mParams.mVolume, mSameOrigin);
SendVideo(mSameOrigin);
AdvanceTracks();
bool finished = (!mInfo.HasAudio() || mAudioQueue.IsFinished()) &&
(!mInfo.HasVideo() || mVideoQueue.IsFinished());
if (finished && !mData->mHaveSentFinish) {