--- a/dom/media/Benchmark.cpp
+++ b/dom/media/Benchmark.cpp
@@ -145,16 +145,17 @@ BenchmarkPlayback::BenchmarkPlayback(Ben
: QueueObject(new TaskQueue(GetMediaThreadPool(MediaThreadType::PLAYBACK)))
, mMainThreadState(aMainThreadState)
, mDecoderTaskQueue(new TaskQueue(GetMediaThreadPool(
MediaThreadType::PLATFORM_DECODER)))
, mDemuxer(aDemuxer)
, mSampleIndex(0)
, mFrameCount(0)
, mFinished(false)
+ , mDrained(false)
{
MOZ_ASSERT(static_cast<Benchmark*>(mMainThreadState)->OnThread());
}
void
BenchmarkPlayback::DemuxSamples()
{
MOZ_ASSERT(OnThread());
@@ -181,18 +182,18 @@ BenchmarkPlayback::DemuxNextSample()
MOZ_ASSERT(OnThread());
RefPtr<Benchmark> ref(mMainThreadState);
RefPtr<MediaTrackDemuxer::SamplesPromise> promise = mTrackDemuxer->GetSamples();
promise->Then(
Thread(), __func__,
[this, ref](RefPtr<MediaTrackDemuxer::SamplesHolder> aHolder) {
mSamples.AppendElements(Move(aHolder->mSamples));
- if (ref->mParameters.mStopAtFrame &&
- mSamples.Length() == (size_t)ref->mParameters.mStopAtFrame.ref()) {
+ if (ref->mParameters.mStopAtFrame
+ && mSamples.Length() == (size_t)ref->mParameters.mStopAtFrame.ref()) {
InitDecoder(Move(*mTrackDemuxer->GetInfo()));
} else {
Dispatch(NS_NewRunnableFunction([this, ref]() { DemuxNextSample(); }));
}
},
[this, ref](const MediaResult& aError) {
switch (aError.Code()) {
case NS_ERROR_DOM_MEDIA_END_OF_STREAM:
@@ -205,131 +206,121 @@ BenchmarkPlayback::DemuxNextSample()
}
void
BenchmarkPlayback::InitDecoder(TrackInfo&& aInfo)
{
MOZ_ASSERT(OnThread());
RefPtr<PDMFactory> platform = new PDMFactory();
- mDecoder = platform->CreateDecoder({ aInfo, mDecoderTaskQueue, reinterpret_cast<MediaDataDecoderCallback*>(this) });
+ mDecoder = platform->CreateDecoder({ aInfo, mDecoderTaskQueue });
if (!mDecoder) {
MainThreadShutdown();
return;
}
RefPtr<Benchmark> ref(mMainThreadState);
mDecoder->Init()->Then(
Thread(), __func__,
[this, ref](TrackInfo::TrackType aTrackType) {
InputExhausted();
},
- [this, ref](MediaResult aError) {
+ [this, ref](const MediaResult& aError) {
MainThreadShutdown();
});
}
void
BenchmarkPlayback::MainThreadShutdown()
{
MOZ_ASSERT(OnThread());
if (mFinished) {
// Nothing more to do.
return;
}
mFinished = true;
if (mDecoder) {
- mDecoder->Flush();
- mDecoder->Shutdown();
- mDecoder = nullptr;
- }
-
- mDecoderTaskQueue->BeginShutdown();
- mDecoderTaskQueue->AwaitShutdownAndIdle();
- mDecoderTaskQueue = nullptr;
+ RefPtr<Benchmark> ref(mMainThreadState);
+ mDecoder->Flush()->Then(
+ Thread(), __func__,
+ [ref, this]() {
+ mDecoder->Shutdown()->Then(
+ Thread(), __func__,
+ [ref, this]() {
+ mDecoderTaskQueue->BeginShutdown();
+ mDecoderTaskQueue->AwaitShutdownAndIdle();
+ mDecoderTaskQueue = nullptr;
- if (mTrackDemuxer) {
- mTrackDemuxer->Reset();
- mTrackDemuxer->BreakCycles();
- mTrackDemuxer = nullptr;
- }
+ if (mTrackDemuxer) {
+ mTrackDemuxer->Reset();
+ mTrackDemuxer->BreakCycles();
+ mTrackDemuxer = nullptr;
+ }
- RefPtr<Benchmark> ref(mMainThreadState);
- Thread()->AsTaskQueue()->BeginShutdown()->Then(
- ref->Thread(), __func__,
- [ref]() { ref->Dispose(); },
- []() { MOZ_CRASH("not reached"); });
+ Thread()->AsTaskQueue()->BeginShutdown()->Then(
+ ref->Thread(), __func__,
+ [ref]() { ref->Dispose(); },
+ []() { MOZ_CRASH("not reached"); });
+ },
+ []() { MOZ_CRASH("not reached"); });
+ mDecoder = nullptr;
+ },
+ []() { MOZ_CRASH("not reached"); });
+ }
}
void
-BenchmarkPlayback::Output(MediaData* aData)
+BenchmarkPlayback::Output(const MediaDataDecoder::DecodedData& aResults)
{
+ MOZ_ASSERT(OnThread());
RefPtr<Benchmark> ref(mMainThreadState);
- Dispatch(NS_NewRunnableFunction([this, ref]() {
- mFrameCount++;
- if (mFrameCount == ref->mParameters.mStartupFrame) {
- mDecodeStartTime = TimeStamp::Now();
- }
- int32_t frames = mFrameCount - ref->mParameters.mStartupFrame;
- TimeDuration elapsedTime = TimeStamp::Now() - mDecodeStartTime;
- if (!mFinished &&
- (frames == ref->mParameters.mFramesToMeasure ||
- elapsedTime >= ref->mParameters.mTimeout)) {
- uint32_t decodeFps = frames / elapsedTime.ToSeconds();
- MainThreadShutdown();
- ref->Dispatch(NS_NewRunnableFunction([ref, decodeFps]() {
- ref->ReturnResult(decodeFps);
- }));
- }
- }));
-}
-
-void
-BenchmarkPlayback::Error(const MediaResult& aError)
-{
- RefPtr<Benchmark> ref(mMainThreadState);
- Dispatch(NS_NewRunnableFunction([this, ref]() { MainThreadShutdown(); }));
+ mFrameCount += aResults.Length();
+ if (!mDecodeStartTime && mFrameCount >= ref->mParameters.mStartupFrame) {
+ mDecodeStartTime = Some(TimeStamp::Now());
+ }
+ TimeStamp now = TimeStamp::Now();
+ int32_t frames = mFrameCount - ref->mParameters.mStartupFrame;
+ TimeDuration elapsedTime = now - mDecodeStartTime.refOr(now);
+ if (!mFinished
+ && (((frames == ref->mParameters.mFramesToMeasure) && frames > 0)
+ || elapsedTime >= ref->mParameters.mTimeout
+ || mDrained)) {
+ uint32_t decodeFps = frames / elapsedTime.ToSeconds();
+ MainThreadShutdown();
+ ref->Dispatch(NS_NewRunnableFunction([ref, decodeFps]() {
+ ref->ReturnResult(decodeFps);
+ }));
+ }
}
void
BenchmarkPlayback::InputExhausted()
{
+ MOZ_ASSERT(OnThread());
+ if (mFinished || mSampleIndex >= mSamples.Length()) {
+ return;
+ }
RefPtr<Benchmark> ref(mMainThreadState);
- Dispatch(NS_NewRunnableFunction([this, ref]() {
- MOZ_ASSERT(OnThread());
- if (mFinished || mSampleIndex >= mSamples.Length()) {
- return;
+ mDecoder->Decode(mSamples[mSampleIndex])
+ ->Then(Thread(), __func__,
+ [ref, this](const MediaDataDecoder::DecodedData& aResults) {
+ Output(aResults);
+ InputExhausted();
+ },
+ [ref, this](const MediaResult& aError) { MainThreadShutdown(); });
+ mSampleIndex++;
+ if (mSampleIndex == mSamples.Length()) {
+ if (ref->mParameters.mStopAtFrame) {
+ mSampleIndex = 0;
+ } else {
+ mDecoder->Drain()->Then(
+ Thread(), __func__,
+ [ref, this](const MediaDataDecoder::DecodedData& aResults) {
+ mDrained = true;
+ Output(aResults);
+ },
+ [ref, this](const MediaResult& aError) { MainThreadShutdown(); });
}
- mDecoder->Input(mSamples[mSampleIndex]);
- mSampleIndex++;
- if (mSampleIndex == mSamples.Length()) {
- if (ref->mParameters.mStopAtFrame) {
- mSampleIndex = 0;
- } else {
- mDecoder->Drain();
- }
- }
- }));
+ }
}
-void
-BenchmarkPlayback::DrainComplete()
-{
- RefPtr<Benchmark> ref(mMainThreadState);
- Dispatch(NS_NewRunnableFunction([this, ref]() {
- int32_t frames = mFrameCount - ref->mParameters.mStartupFrame;
- TimeDuration elapsedTime = TimeStamp::Now() - mDecodeStartTime;
- uint32_t decodeFps = frames / elapsedTime.ToSeconds();
- MainThreadShutdown();
- ref->Dispatch(NS_NewRunnableFunction([ref, decodeFps]() {
- ref->ReturnResult(decodeFps);
- }));
- }));
-}
-
-bool
-BenchmarkPlayback::OnReaderTaskQueue()
-{
- return OnThread();
-}
-
-}
+} // namespace mozilla
--- a/dom/media/Benchmark.h
+++ b/dom/media/Benchmark.h
@@ -5,56 +5,53 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MOZILLA_BENCHMARK_H
#define MOZILLA_BENCHMARK_H
#include "MediaDataDemuxer.h"
#include "QueueObject.h"
#include "PlatformDecoderModule.h"
+#include "mozilla/Maybe.h"
#include "mozilla/RefPtr.h"
#include "mozilla/TaskQueue.h"
#include "mozilla/TimeStamp.h"
#include "nsCOMPtr.h"
namespace mozilla {
class TaskQueue;
class Benchmark;
-class BenchmarkPlayback : public QueueObject, private MediaDataDecoderCallback
+class BenchmarkPlayback : public QueueObject
{
friend class Benchmark;
explicit BenchmarkPlayback(Benchmark* aMainThreadState, MediaDataDemuxer* aDemuxer);
void DemuxSamples();
void DemuxNextSample();
void MainThreadShutdown();
void InitDecoder(TrackInfo&& aInfo);
- // MediaDataDecoderCallback
- // Those methods are called on the MediaDataDecoder's task queue.
- void Output(MediaData* aData) override;
- void Error(const MediaResult& aError) override;
- void InputExhausted() override;
- void DrainComplete() override;
- bool OnReaderTaskQueue() override;
+ void Output(const MediaDataDecoder::DecodedData& aResults);
+ void InputExhausted();
Atomic<Benchmark*> mMainThreadState;
RefPtr<TaskQueue> mDecoderTaskQueue;
RefPtr<MediaDataDecoder> mDecoder;
// Object only accessed on Thread()
RefPtr<MediaDataDemuxer> mDemuxer;
RefPtr<MediaTrackDemuxer> mTrackDemuxer;
nsTArray<RefPtr<MediaRawData>> mSamples;
size_t mSampleIndex;
- TimeStamp mDecodeStartTime;
+ Maybe<TimeStamp> mDecodeStartTime;
uint32_t mFrameCount;
bool mFinished;
+ bool mDrained;
};
// Init() must have been called at least once prior on the
// main thread.
class Benchmark : public QueueObject
{
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Benchmark)
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -17,16 +17,17 @@
#include "mozilla/layers/ShadowLayers.h"
#include "mozilla/AbstractThread.h"
#include "mozilla/CDMProxy.h"
#include "mozilla/ClearOnShutdown.h"
#include "mozilla/Preferences.h"
#include "mozilla/Telemetry.h"
#include "mozilla/SharedThreadPool.h"
#include "mozilla/SyncRunnable.h"
+#include "mozilla/Unused.h"
#include "nsContentUtils.h"
#include "nsPrintfCString.h"
#include "nsSize.h"
#include <algorithm>
#include <queue>
using namespace mozilla::media;
@@ -193,16 +194,37 @@ class MediaFormatReader::DecoderFactory
{
using InitPromise = MediaDataDecoder::InitPromise;
using TokenPromise = DecoderAllocPolicy::Promise;
using Token = DecoderAllocPolicy::Token;
public:
explicit DecoderFactory(MediaFormatReader* aOwner) : mOwner(aOwner) {}
void CreateDecoder(TrackType aTrack);
+ // Shutdown any decoder pending initialization.
+ RefPtr<ShutdownPromise> ShutdownDecoder(TrackType aTrack)
+ {
+ MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack ||
+ aTrack == TrackInfo::kVideoTrack);
+ auto& data = aTrack == TrackInfo::kAudioTrack ? mAudio : mVideo;
+ data.mTokenRequest.DisconnectIfExists();
+ data.mInitRequest.DisconnectIfExists();
+ if (!data.mDecoder) {
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+ }
+ if (data.mShutdownRequest.Exists()) {
+ // A shutdown is already in progress due to a prior initialization error,
+ // return the existing promise.
+ data.mShutdownRequest.Disconnect();
+ RefPtr<ShutdownPromise> p = data.mShutdownPromise.forget();
+ return p;
+ }
+ RefPtr<MediaDataDecoder> decoder = data.mDecoder.forget();
+ return decoder->Shutdown();
+ }
private:
class Wrapper;
enum class Stage : int8_t
{
None,
WaitForToken,
@@ -210,27 +232,20 @@ private:
WaitForInit
};
struct Data
{
Stage mStage = Stage::None;
RefPtr<Token> mToken;
RefPtr<MediaDataDecoder> mDecoder;
- MozPromiseRequestHolder<TokenPromise> mTokenPromise;
- MozPromiseRequestHolder<InitPromise> mInitPromise;
- ~Data()
- {
- mTokenPromise.DisconnectIfExists();
- mInitPromise.DisconnectIfExists();
- if (mDecoder) {
- mDecoder->Flush();
- mDecoder->Shutdown();
- }
- }
+ MozPromiseRequestHolder<TokenPromise> mTokenRequest;
+ MozPromiseRequestHolder<InitPromise> mInitRequest;
+ MozPromiseRequestHolder<ShutdownPromise> mShutdownRequest;
+ RefPtr<ShutdownPromise> mShutdownPromise;
} mAudio, mVideo;
void RunStage(TrackType aTrack);
MediaResult DoCreateDecoder(TrackType aTrack);
void DoInitDecoder(TrackType aTrack);
MediaFormatReader* const mOwner; // guaranteed to be valid by the owner.
};
@@ -248,40 +263,46 @@ class MediaFormatReader::DecoderFactory:
using Token = DecoderAllocPolicy::Token;
public:
Wrapper(already_AddRefed<MediaDataDecoder> aDecoder,
already_AddRefed<Token> aToken)
: mDecoder(aDecoder), mToken(aToken) {}
RefPtr<InitPromise> Init() override { return mDecoder->Init(); }
- void Input(MediaRawData* aSample) override { mDecoder->Input(aSample); }
- void Flush() override { mDecoder->Flush(); }
- void Drain() override { mDecoder->Drain(); }
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override
+ {
+ return mDecoder->Decode(aSample);
+ }
+ RefPtr<DecodePromise> Drain() override { return mDecoder->Drain(); }
+ RefPtr<FlushPromise> Flush() override { return mDecoder->Flush(); }
bool IsHardwareAccelerated(nsACString& aFailureReason) const override
{
return mDecoder->IsHardwareAccelerated(aFailureReason);
}
const char* GetDescriptionName() const override
{
return mDecoder->GetDescriptionName();
}
void SetSeekThreshold(const media::TimeUnit& aTime) override
{
mDecoder->SetSeekThreshold(aTime);
}
bool SupportDecoderRecycling() const override
{
return mDecoder->SupportDecoderRecycling();
}
- void Shutdown() override
+ RefPtr<ShutdownPromise> Shutdown() override
{
- mDecoder->Shutdown();
- mDecoder = nullptr;
- mToken = nullptr;
+ RefPtr<MediaDataDecoder> decoder = mDecoder.forget();
+ RefPtr<Token> token = mToken.forget();
+ return decoder->Shutdown()->Then(
+ AbstractThread::GetCurrent(), __func__,
+ [token]() {},
+ [token]() { MOZ_RELEASE_ASSERT(false, "Can't reach here"); });
}
private:
RefPtr<MediaDataDecoder> mDecoder;
RefPtr<Token> mToken;
};
void
@@ -290,39 +311,39 @@ MediaFormatReader::DecoderFactory::RunSt
auto& data = aTrack == TrackInfo::kAudioTrack ? mAudio : mVideo;
switch (data.mStage) {
case Stage::None: {
MOZ_ASSERT(!data.mToken);
DecoderAllocPolicy::Instance(aTrack).Alloc()->Then(
mOwner->OwnerThread(), __func__,
[this, &data, aTrack] (Token* aToken) {
- data.mTokenPromise.Complete();
+ data.mTokenRequest.Complete();
data.mToken = aToken;
data.mStage = Stage::CreateDecoder;
RunStage(aTrack);
},
[&data] () {
- data.mTokenPromise.Complete();
+ data.mTokenRequest.Complete();
data.mStage = Stage::None;
- })->Track(data.mTokenPromise);
+ })->Track(data.mTokenRequest);
data.mStage = Stage::WaitForToken;
break;
}
case Stage::WaitForToken: {
MOZ_ASSERT(!data.mToken);
- MOZ_ASSERT(data.mTokenPromise.Exists());
+ MOZ_ASSERT(data.mTokenRequest.Exists());
break;
}
case Stage::CreateDecoder: {
MOZ_ASSERT(data.mToken);
MOZ_ASSERT(!data.mDecoder);
- MOZ_ASSERT(!data.mInitPromise.Exists());
+ MOZ_ASSERT(!data.mInitRequest.Exists());
MediaResult rv = DoCreateDecoder(aTrack);
if (NS_FAILED(rv)) {
NS_WARNING("Error constructing decoders");
data.mToken = nullptr;
data.mStage = Stage::None;
mOwner->NotifyError(aTrack, rv);
return;
@@ -331,17 +352,17 @@ MediaFormatReader::DecoderFactory::RunSt
data.mDecoder = new Wrapper(data.mDecoder.forget(), data.mToken.forget());
DoInitDecoder(aTrack);
data.mStage = Stage::WaitForInit;
break;
}
case Stage::WaitForInit: {
MOZ_ASSERT(data.mDecoder);
- MOZ_ASSERT(data.mInitPromise.Exists());
+ MOZ_ASSERT(data.mInitRequest.Exists());
break;
}
}
}
MediaResult
MediaFormatReader::DecoderFactory::DoCreateDecoder(TrackType aTrack)
{
@@ -361,79 +382,88 @@ MediaFormatReader::DecoderFactory::DoCre
switch (aTrack) {
case TrackInfo::kAudioTrack: {
data.mDecoder = mOwner->mPlatform->CreateDecoder({
ownerData.mInfo
? *ownerData.mInfo->GetAsAudioInfo()
: *ownerData.mOriginalInfo->GetAsAudioInfo(),
ownerData.mTaskQueue,
- ownerData.mCallback.get(),
mOwner->mCrashHelper,
ownerData.mIsBlankDecode,
&result
});
break;
}
case TrackType::kVideoTrack: {
// Decoders use the layers backend to decide if they can use hardware decoding,
// so specify LAYERS_NONE if we want to forcibly disable it.
data.mDecoder = mOwner->mPlatform->CreateDecoder({
ownerData.mInfo
? *ownerData.mInfo->GetAsVideoInfo()
: *ownerData.mOriginalInfo->GetAsVideoInfo(),
ownerData.mTaskQueue,
- ownerData.mCallback.get(),
mOwner->mKnowsCompositor,
mOwner->GetImageContainer(),
mOwner->mCrashHelper,
ownerData.mIsBlankDecode,
&result
});
break;
}
default:
break;
}
if (data.mDecoder) {
- result = MediaResult(NS_OK);
- return result;
+ return NS_OK;
}
ownerData.mDescription = decoderCreatingError;
return result;
}
void
MediaFormatReader::DecoderFactory::DoInitDecoder(TrackType aTrack)
{
auto& ownerData = mOwner->GetDecoderData(aTrack);
auto& data = aTrack == TrackInfo::kAudioTrack ? mAudio : mVideo;
- data.mDecoder->Init()->Then(
- mOwner->OwnerThread(), __func__,
- [this, &data, &ownerData] (TrackType aTrack) {
- data.mInitPromise.Complete();
- data.mStage = Stage::None;
- MutexAutoLock lock(ownerData.mMutex);
- ownerData.mDecoder = data.mDecoder.forget();
- ownerData.mDescription = ownerData.mDecoder->GetDescriptionName();
- mOwner->SetVideoDecodeThreshold();
- mOwner->ScheduleUpdate(aTrack);
- },
- [this, &data, aTrack] (MediaResult aError) {
- data.mInitPromise.Complete();
- data.mStage = Stage::None;
- data.mDecoder->Shutdown();
- data.mDecoder = nullptr;
- mOwner->NotifyError(aTrack, aError);
- })->Track(data.mInitPromise);
+ data.mDecoder->Init()
+ ->Then(mOwner->OwnerThread(), __func__,
+ [this, &data, &ownerData](TrackType aTrack) {
+ data.mInitRequest.Complete();
+ data.mStage = Stage::None;
+ MutexAutoLock lock(ownerData.mMutex);
+ ownerData.mDecoder = data.mDecoder.forget();
+ ownerData.mDescription = ownerData.mDecoder->GetDescriptionName();
+ mOwner->SetVideoDecodeThreshold();
+ mOwner->ScheduleUpdate(aTrack);
+ },
+ [this, &data, &ownerData, aTrack](const MediaResult& aError) {
+ data.mInitRequest.Complete();
+ MOZ_RELEASE_ASSERT(!ownerData.mDecoder,
+ "Can't have a decoder already set");
+ data.mStage = Stage::None;
+ data.mShutdownPromise = data.mDecoder->Shutdown();
+ data.mShutdownPromise
+ ->Then(
+ mOwner->OwnerThread(), __func__,
+ [this, &data, aTrack, aError]() {
+ data.mShutdownRequest.Complete();
+ data.mShutdownPromise = nullptr;
+ data.mDecoder = nullptr;
+ mOwner->NotifyError(aTrack, aError);
+ },
+ []() { MOZ_RELEASE_ASSERT(false, "Can't ever get here"); })
+ ->Track(data.mShutdownRequest);
+ })
+ ->Track(data.mInitRequest);
}
// DemuxerProxy ensures that the original main demuxer is only ever accessed
// via its own dedicated task queue.
// This ensure that the reader's taskqueue will never blocked while a demuxer
// is itself blocked attempting to access the MediaCache or the MediaResource.
class MediaFormatReader::DemuxerProxy
{
@@ -817,69 +847,141 @@ MediaFormatReader::~MediaFormatReader()
{
MOZ_COUNT_DTOR(MediaFormatReader);
}
RefPtr<ShutdownPromise>
MediaFormatReader::Shutdown()
{
MOZ_ASSERT(OnTaskQueue());
+ LOG("");
- mDecoderFactory = nullptr;
mDemuxerInitRequest.DisconnectIfExists();
mNotifyDataArrivedPromise.DisconnectIfExists();
mMetadataPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
mSeekPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
mSkipRequest.DisconnectIfExists();
- if (mAudio.mDecoder) {
- Reset(TrackInfo::kAudioTrack);
- if (mAudio.HasPromise()) {
- mAudio.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
- }
- mAudio.ShutdownDecoder();
+ if (mAudio.HasPromise()) {
+ mAudio.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
}
- if (mAudio.mTrackDemuxer) {
+ if (mVideo.HasPromise()) {
+ mVideo.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ }
+
+ nsTArray<RefPtr<ShutdownPromise>> promises;
+
+ if (HasAudio()) {
mAudio.ResetDemuxer();
mAudio.mTrackDemuxer->BreakCycles();
mAudio.mTrackDemuxer = nullptr;
+ mAudio.ResetState();
+ promises.AppendElement(ShutdownDecoderWithPromise(TrackInfo::kAudioTrack));
}
+
+ if (HasVideo()) {
+ mVideo.ResetDemuxer();
+ mVideo.mTrackDemuxer->BreakCycles();
+ mVideo.mTrackDemuxer = nullptr;
+ mVideo.ResetState();
+ promises.AppendElement(ShutdownDecoderWithPromise(TrackInfo::kVideoTrack));
+ }
+
+ mDemuxer = nullptr;
+ mCompositorUpdatedListener.DisconnectIfExists();
+
+ if (promises.IsEmpty()) {
+ TearDownDecoders();
+ return MediaDecoderReader::Shutdown();
+ }
+
+ RefPtr<ShutdownPromise> p = mShutdownPromise.Ensure(__func__);
+ ShutdownPromise::All(OwnerThread(), promises)
+ ->Then(OwnerThread(), __func__, this,
+ &MediaFormatReader::TearDownDecoders,
+ &MediaFormatReader::TearDownDecoders);
+
+ mShutdown = true;
+
+ return p;
+}
+
+RefPtr<ShutdownPromise>
+MediaFormatReader::ShutdownDecoderWithPromise(TrackType aTrack)
+{
+ LOGV("%s", TrackTypeToStr(aTrack));
+
+ auto& decoder = GetDecoderData(aTrack);
+ if (!decoder.mFlushed && decoder.mDecoder) {
+ // The decoder has yet to be flushed.
+ // We always flush the decoder prior to a shutdown to ensure that all the
+ // potentially pending operations on the decoder are completed.
+ decoder.Flush();
+ return decoder.mShutdownPromise.Ensure(__func__);
+ }
+
+ if (decoder.mFlushRequest.Exists() || decoder.mShutdownRequest.Exists()) {
+ // Let the current flush or shutdown operation complete, Flush will continue
+ // shutting down the current decoder now that the shutdown promise is set.
+ return decoder.mShutdownPromise.Ensure(__func__);
+ }
+
+ if (!decoder.mDecoder) {
+ // Shutdown any decoders that may be in the process of being initialized
+ // in the Decoder Factory.
+ // This will be a no-op until we're processing the final decoder shutdown
+ // prior to the MediaFormatReader being shutdown.
+ return mDecoderFactory->ShutdownDecoder(aTrack);
+ }
+
+ // Finally, let's just shut down the currently active decoder.
+ decoder.ShutdownDecoder();
+ return decoder.mShutdownPromise.Ensure(__func__);
+}
+
+void
+MediaFormatReader::ShutdownDecoder(TrackType aTrack)
+{
+ LOG("%s", TrackTypeToStr(aTrack));
+ auto& decoder = GetDecoderData(aTrack);
+ if (!decoder.mDecoder) {
+ LOGV("Already shut down");
+ return;
+ }
+ if (!decoder.mShutdownPromise.IsEmpty()) {
+ LOGV("Shutdown already in progress");
+ return;
+ }
+ Unused << ShutdownDecoderWithPromise(aTrack);
+}
+
+void
+MediaFormatReader::TearDownDecoders()
+{
if (mAudio.mTaskQueue) {
mAudio.mTaskQueue->BeginShutdown();
mAudio.mTaskQueue->AwaitShutdownAndIdle();
mAudio.mTaskQueue = nullptr;
}
- MOZ_ASSERT(!mAudio.HasPromise());
-
- if (mVideo.mDecoder) {
- Reset(TrackInfo::kVideoTrack);
- if (mVideo.HasPromise()) {
- mVideo.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
- }
- mVideo.ShutdownDecoder();
- }
- if (mVideo.mTrackDemuxer) {
- mVideo.ResetDemuxer();
- mVideo.mTrackDemuxer->BreakCycles();
- mVideo.mTrackDemuxer = nullptr;
- }
if (mVideo.mTaskQueue) {
mVideo.mTaskQueue->BeginShutdown();
mVideo.mTaskQueue->AwaitShutdownAndIdle();
mVideo.mTaskQueue = nullptr;
}
- MOZ_ASSERT(!mVideo.HasPromise());
- mDemuxer = nullptr;
+ mDecoderFactory = nullptr;
mPlatform = nullptr;
mVideoFrameContainer = nullptr;
- mCompositorUpdatedListener.DisconnectIfExists();
+ if (mShutdownPromise.IsEmpty()) {
+ return;
+ }
- return MediaDecoderReader::Shutdown();
+ MediaDecoderReader::Shutdown();
+ mShutdownPromise.Resolve(true, __func__);
}
void
MediaFormatReader::InitLayersBackendType()
{
// Extract the layer manager backend type so that platform decoders
// can determine whether it's worthwhile using hardware accelerated
// video decoding.
@@ -917,17 +1019,18 @@ MediaFormatReader::InitInternal()
if (mDecoder) {
// Note: GMPCrashHelper must be created on main thread, as it may use
// weak references, which aren't threadsafe.
mCrashHelper = mDecoder->GetCrashHelper();
}
return NS_OK;
}
-class DispatchKeyNeededEvent : public Runnable {
+class DispatchKeyNeededEvent : public Runnable
+{
public:
DispatchKeyNeededEvent(AbstractMediaDecoder* aDecoder,
nsTArray<uint8_t>& aInitData,
const nsString& aInitDataType)
: mDecoder(aDecoder)
, mInitData(aInitData)
, mInitDataType(aInitDataType)
{
@@ -956,17 +1059,18 @@ MediaFormatReader::SetCDMProxy(CDMProxy*
nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([=] () {
MOZ_ASSERT(self->OnTaskQueue());
self->mCDMProxy = proxy;
});
OwnerThread()->Dispatch(r.forget());
}
bool
-MediaFormatReader::IsWaitingOnCDMResource() {
+MediaFormatReader::IsWaitingOnCDMResource()
+{
MOZ_ASSERT(OnTaskQueue());
return IsEncrypted() && !mCDMProxy;
}
RefPtr<MediaDecoderReader::MetadataPromise>
MediaFormatReader::AsyncReadMetadata()
{
MOZ_ASSERT(OnTaskQueue());
@@ -1026,17 +1130,16 @@ MediaFormatReader::OnDemuxerInitDone(nsr
mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__);
return;
}
mInfo.mVideo = *videoInfo->GetAsVideoInfo();
for (const MetadataTag& tag : videoInfo->mTags) {
tags->Put(tag.mKey, tag.mValue);
}
mVideo.mOriginalInfo = Move(videoInfo);
- mVideo.mCallback = new DecoderCallback(this, TrackInfo::kVideoTrack);
mTrackDemuxersMayBlock |= mVideo.mTrackDemuxer->GetSamplesMayBlock();
} else {
mVideo.mTrackDemuxer->BreakCycles();
mVideo.mTrackDemuxer = nullptr;
}
}
bool audioActive = !!mDemuxer->GetNumberTracks(TrackInfo::kAudioTrack);
@@ -1054,17 +1157,16 @@ MediaFormatReader::OnDemuxerInitDone(nsr
platform->SupportsMimeType(audioInfo->mMimeType, nullptr));
if (audioActive) {
mInfo.mAudio = *audioInfo->GetAsAudioInfo();
for (const MetadataTag& tag : audioInfo->mTags) {
tags->Put(tag.mKey, tag.mValue);
}
mAudio.mOriginalInfo = Move(audioInfo);
- mAudio.mCallback = new DecoderCallback(this, TrackInfo::kAudioTrack);
mTrackDemuxersMayBlock |= mAudio.mTrackDemuxer->GetSamplesMayBlock();
} else {
mAudio.mTrackDemuxer->BreakCycles();
mAudio.mTrackDemuxer = nullptr;
}
}
UniquePtr<EncryptionInfo> crypto = mDemuxer->GetCrypto();
@@ -1364,52 +1466,38 @@ MediaFormatReader::OnAudioDemuxCompleted
aSamples->mSamples.Length(),
aSamples->mSamples[0]->mTrackInfo ? aSamples->mSamples[0]->mTrackInfo->GetID() : 0);
mAudio.mDemuxRequest.Complete();
mAudio.mQueuedSamples.AppendElements(aSamples->mSamples);
ScheduleUpdate(TrackInfo::kAudioTrack);
}
void
-MediaFormatReader::NotifyNewOutput(TrackType aTrack, MediaData* aSample)
+MediaFormatReader::NotifyNewOutput(TrackType aTrack,
+ const MediaDataDecoder::DecodedData& aResults)
{
MOZ_ASSERT(OnTaskQueue());
- LOGV("Received new %s sample time:%lld duration:%lld",
- TrackTypeToStr(aTrack), aSample->mTime, aSample->mDuration);
auto& decoder = GetDecoderData(aTrack);
- if (!decoder.mOutputRequested) {
- LOG("MediaFormatReader produced output while flushing, discarding.");
- return;
+ for (auto& sample : aResults) {
+ LOGV("Received new %s sample time:%lld duration:%lld",
+ TrackTypeToStr(aTrack), sample->mTime, sample->mDuration);
+ decoder.mOutput.AppendElement(sample);
+ decoder.mNumSamplesOutput++;
+ decoder.mNumOfConsecutiveError = 0;
}
- decoder.mOutput.AppendElement(aSample);
- decoder.mNumSamplesOutput++;
- decoder.mNumOfConsecutiveError = 0;
- ScheduleUpdate(aTrack);
-}
-
-void
-MediaFormatReader::NotifyInputExhausted(TrackType aTrack)
-{
- MOZ_ASSERT(OnTaskQueue());
- LOGV("Decoder has requested more %s data", TrackTypeToStr(aTrack));
- auto& decoder = GetDecoderData(aTrack);
- decoder.mDecodePending = false;
+ LOG("Done processing new %s samples", TrackTypeToStr(aTrack));
ScheduleUpdate(aTrack);
}
void
MediaFormatReader::NotifyDrainComplete(TrackType aTrack)
{
MOZ_ASSERT(OnTaskQueue());
auto& decoder = GetDecoderData(aTrack);
LOG("%s", TrackTypeToStr(aTrack));
- if (!decoder.mOutputRequested) {
- LOG("MediaFormatReader called DrainComplete() before flushing, ignoring.");
- return;
- }
decoder.mDrainComplete = true;
ScheduleUpdate(aTrack);
}
void
MediaFormatReader::NotifyError(TrackType aTrack, const MediaResult& aError)
{
MOZ_ASSERT(OnTaskQueue());
@@ -1439,28 +1527,26 @@ MediaFormatReader::NotifyEndOfStream(Tra
auto& decoder = GetDecoderData(aTrack);
decoder.mDemuxEOS = true;
ScheduleUpdate(aTrack);
}
bool
MediaFormatReader::NeedInput(DecoderData& aDecoder)
{
- // To account for H.264 streams which may require a longer
- // run of input than we input, decoders fire an "input exhausted" callback.
- // The decoder will not be fed a new raw sample until InputExhausted
- // has been called.
+ // The decoder will not be fed a new raw sample until the current decoding
+ // requests has completed.
return
(aDecoder.HasPromise() || aDecoder.mTimeThreshold.isSome()) &&
!aDecoder.HasPendingDrain() &&
!aDecoder.HasFatalError() &&
!aDecoder.mDemuxRequest.Exists() &&
!aDecoder.mOutput.Length() &&
!aDecoder.HasInternalSeekPending() &&
- !aDecoder.mDecodePending;
+ !aDecoder.mDecodeRequest.Exists();
}
void
MediaFormatReader::ScheduleUpdate(TrackType aTrack)
{
MOZ_ASSERT(OnTaskQueue());
if (mShutdown) {
return;
@@ -1584,51 +1670,62 @@ MediaFormatReader::RequestDemuxSamples(T
}
void
MediaFormatReader::DecodeDemuxedSamples(TrackType aTrack,
MediaRawData* aSample)
{
MOZ_ASSERT(OnTaskQueue());
auto& decoder = GetDecoderData(aTrack);
- decoder.mDecoder->Input(aSample);
- decoder.mDecodePending = true;
+ RefPtr<MediaFormatReader> self = this;
+ decoder.mFlushed = false;
+ decoder.mDecoder->Decode(aSample)
+ ->Then(mTaskQueue, __func__,
+ [self, this, aTrack, &decoder]
+ (const MediaDataDecoder::DecodedData& aResults) {
+ decoder.mDecodeRequest.Complete();
+ NotifyNewOutput(aTrack, aResults);
+ },
+ [self, this, aTrack, &decoder](const MediaResult& aError) {
+ decoder.mDecodeRequest.Complete();
+ NotifyError(aTrack, aError);
+ })
+ ->Track(decoder.mDecodeRequest);
}
void
MediaFormatReader::HandleDemuxedSamples(TrackType aTrack,
AbstractMediaDecoder::AutoNotifyDecoded& aA)
{
MOZ_ASSERT(OnTaskQueue());
auto& decoder = GetDecoderData(aTrack);
+ if (decoder.mFlushRequest.Exists() || decoder.mShutdownRequest.Exists()) {
+ LOGV("Decoder operation in progress, let it complete.");
+ return;
+ }
+
if (decoder.mQueuedSamples.IsEmpty()) {
return;
}
if (!decoder.mDecoder) {
mDecoderFactory->CreateDecoder(aTrack);
return;
}
LOGV("Giving %s input to decoder", TrackTypeToStr(aTrack));
// Decode all our demuxed frames.
- bool samplesPending = false;
while (decoder.mQueuedSamples.Length()) {
RefPtr<MediaRawData> sample = decoder.mQueuedSamples[0];
RefPtr<SharedTrackInfo> info = sample->mTrackInfo;
if (info && decoder.mLastStreamSourceID != info->GetID()) {
- if (samplesPending) {
- // Let existing samples complete their decoding. We'll resume later.
- return;
- }
-
bool supportRecycling = MediaPrefs::MediaDecoderCheckRecycling() &&
decoder.mDecoder->SupportDecoderRecycling();
if (decoder.mNextStreamSourceID.isNothing() ||
decoder.mNextStreamSourceID.ref() != info->GetID()) {
if (!supportRecycling) {
LOG("%s stream id has changed from:%d to:%d, draining decoder.",
TrackTypeToStr(aTrack), decoder.mLastStreamSourceID,
info->GetID());
@@ -1639,28 +1736,29 @@ MediaFormatReader::HandleDemuxedSamples(
}
}
LOG("%s stream id has changed from:%d to:%d.",
TrackTypeToStr(aTrack), decoder.mLastStreamSourceID,
info->GetID());
decoder.mLastStreamSourceID = info->GetID();
decoder.mNextStreamSourceID.reset();
+ decoder.mInfo = info;
+
if (!supportRecycling) {
LOG("Decoder does not support recycling, recreate decoder.");
- // Reset will clear our array of queued samples. So make a copy now.
- nsTArray<RefPtr<MediaRawData>> samples{decoder.mQueuedSamples};
- Reset(aTrack);
- decoder.ShutdownDecoder();
+ // If flushing is required, it will clear our array of queued samples.
+ // So make a copy now.
+ nsTArray<RefPtr<MediaRawData>> samples{ Move(decoder.mQueuedSamples) };
+ ShutdownDecoder(aTrack);
if (sample->mKeyframe) {
decoder.mQueuedSamples.AppendElements(Move(samples));
}
}
- decoder.mInfo = info;
if (sample->mKeyframe) {
ScheduleUpdate(aTrack);
} else {
TimeInterval time =
TimeInterval(TimeUnit::FromMicroseconds(sample->mTime),
TimeUnit::FromMicroseconds(sample->GetEndTime()));
InternalSeekTarget seekTarget =
decoder.mTimeThreshold.refOr(InternalSeekTarget(time, false));
@@ -1668,27 +1766,26 @@ MediaFormatReader::HandleDemuxedSamples(
sample->mTime);
InternalSeek(aTrack, seekTarget);
}
return;
}
LOGV("Input:%lld (dts:%lld kf:%d)",
sample->mTime, sample->mTimecode, sample->mKeyframe);
- decoder.mOutputRequested = true;
decoder.mNumSamplesInput++;
decoder.mSizeOfQueue++;
if (aTrack == TrackInfo::kVideoTrack) {
aA.mStats.mParsedFrames++;
}
DecodeDemuxedSamples(aTrack, sample);
decoder.mQueuedSamples.RemoveElementAt(0);
- samplesPending = true;
+ break;
}
}
void
MediaFormatReader::InternalSeek(TrackType aTrack, const InternalSeekTarget& aTarget)
{
MOZ_ASSERT(OnTaskQueue());
LOG("%s internal seek to %f",
@@ -1738,27 +1835,38 @@ MediaFormatReader::DrainDecoder(TrackTyp
{
MOZ_ASSERT(OnTaskQueue());
auto& decoder = GetDecoderData(aTrack);
if (!decoder.mNeedDraining || decoder.mDraining) {
return;
}
decoder.mNeedDraining = false;
- // mOutputRequest must be set, otherwise NotifyDrainComplete()
- // may reject the drain if a Flush recently occurred.
- decoder.mOutputRequested = true;
if (!decoder.mDecoder ||
decoder.mNumSamplesInput == decoder.mNumSamplesOutput) {
// No frames to drain.
+ LOGV("Draining %s with nothing to drain", TrackTypeToStr(aTrack));
NotifyDrainComplete(aTrack);
return;
}
- decoder.mDecoder->Drain();
decoder.mDraining = true;
+ RefPtr<MediaFormatReader> self = this;
+ decoder.mDecoder->Drain()
+ ->Then(mTaskQueue, __func__,
+ [self, this, aTrack, &decoder]
+ (const MediaDataDecoder::DecodedData& aResults) {
+ decoder.mDrainRequest.Complete();
+ NotifyNewOutput(aTrack, aResults);
+ NotifyDrainComplete(aTrack);
+ },
+ [self, this, aTrack, &decoder](const MediaResult& aError) {
+ decoder.mDrainRequest.Complete();
+ NotifyError(aTrack, aError);
+ })
+ ->Track(decoder.mDrainRequest);
LOG("Requesting %s decoder to drain", TrackTypeToStr(aTrack));
}
void
MediaFormatReader::Update(TrackType aTrack)
{
MOZ_ASSERT(OnTaskQueue());
@@ -1906,44 +2014,45 @@ MediaFormatReader::Update(TrackType aTra
}
if (decoder.mNeedDraining) {
DrainDecoder(aTrack);
return;
}
if (decoder.mError && !decoder.HasFatalError()) {
- decoder.mDecodePending = false;
bool needsNewDecoder = decoder.mError.ref() == NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER;
if (!needsNewDecoder && ++decoder.mNumOfConsecutiveError > decoder.mMaxConsecutiveError) {
NotifyError(aTrack, decoder.mError.ref());
return;
}
decoder.mError.reset();
LOG("%s decoded error count %d", TrackTypeToStr(aTrack),
decoder.mNumOfConsecutiveError);
media::TimeUnit nextKeyframe;
if (aTrack == TrackType::kVideoTrack && !decoder.HasInternalSeekPending() &&
NS_SUCCEEDED(decoder.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe))) {
if (needsNewDecoder) {
- decoder.ShutdownDecoder();
+ ShutdownDecoder(aTrack);
}
SkipVideoDemuxToNextKeyFrame(decoder.mLastSampleTime.refOr(TimeInterval()).Length());
- return;
} else if (aTrack == TrackType::kAudioTrack) {
decoder.Flush();
}
+ return;
}
bool needInput = NeedInput(decoder);
- LOGV("Update(%s) ni=%d no=%d ie=%d, in:%llu out:%llu qs=%u pending:%u waiting:%d promise:%d sid:%u",
- TrackTypeToStr(aTrack), needInput, needOutput, decoder.mDecodePending,
- decoder.mNumSamplesInput, decoder.mNumSamplesOutput,
- uint32_t(size_t(decoder.mSizeOfQueue)), uint32_t(decoder.mOutput.Length()),
+ LOGV("Update(%s) ni=%d no=%d in:%llu out:%llu qs=%u decoding:%d flushing:%d "
+ "shutdown:%d pending:%u waiting:%d promise:%d sid:%u",
+ TrackTypeToStr(aTrack), needInput, needOutput, decoder.mNumSamplesInput,
+ decoder.mNumSamplesOutput, uint32_t(size_t(decoder.mSizeOfQueue)),
+ decoder.mDecodeRequest.Exists(), decoder.mFlushRequest.Exists(),
+ decoder.mShutdownRequest.Exists(), uint32_t(decoder.mOutput.Length()),
decoder.mWaitingForData, decoder.HasPromise(),
decoder.mLastStreamSourceID);
if ((decoder.mWaitingForData &&
(!decoder.mTimeThreshold || decoder.mTimeThreshold.ref().mWaiting))) {
// Nothing more we can do at present.
LOGV("Still waiting for data or key.");
return;
@@ -2069,62 +2178,16 @@ MediaFormatReader::ResetDecode(TrackSet
mAudio.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
}
}
return MediaDecoderReader::ResetDecode(aTracks);
}
void
-MediaFormatReader::Output(TrackType aTrack, MediaData* aSample)
-{
- if (!aSample) {
- NS_WARNING("MediaFormatReader::Output() passed a null sample");
- Error(aTrack, MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__));
- return;
- }
-
- LOGV("Decoded %s sample time=%lld timecode=%lld kf=%d dur=%lld",
- TrackTypeToStr(aTrack), aSample->mTime, aSample->mTimecode,
- aSample->mKeyframe, aSample->mDuration);
-
- RefPtr<nsIRunnable> task =
- NewRunnableMethod<TrackType, MediaData*>(
- this, &MediaFormatReader::NotifyNewOutput, aTrack, aSample);
- OwnerThread()->Dispatch(task.forget());
-}
-
-void
-MediaFormatReader::DrainComplete(TrackType aTrack)
-{
- RefPtr<nsIRunnable> task =
- NewRunnableMethod<TrackType>(
- this, &MediaFormatReader::NotifyDrainComplete, aTrack);
- OwnerThread()->Dispatch(task.forget());
-}
-
-void
-MediaFormatReader::InputExhausted(TrackType aTrack)
-{
- RefPtr<nsIRunnable> task =
- NewRunnableMethod<TrackType>(
- this, &MediaFormatReader::NotifyInputExhausted, aTrack);
- OwnerThread()->Dispatch(task.forget());
-}
-
-void
-MediaFormatReader::Error(TrackType aTrack, const MediaResult& aError)
-{
- RefPtr<nsIRunnable> task =
- NewRunnableMethod<TrackType, MediaResult>(
- this, &MediaFormatReader::NotifyError, aTrack, aError);
- OwnerThread()->Dispatch(task.forget());
-}
-
-void
MediaFormatReader::Reset(TrackType aTrack)
{
MOZ_ASSERT(OnTaskQueue());
LOG("Reset(%s) BEGIN", TrackTypeToStr(aTrack));
auto& decoder = GetDecoderData(aTrack);
decoder.ResetState();
@@ -2158,20 +2221,16 @@ void
MediaFormatReader::SkipVideoDemuxToNextKeyFrame(media::TimeUnit aTimeThreshold)
{
MOZ_ASSERT(OnTaskQueue());
LOG("Skipping up to %lld", aTimeThreshold.ToMicroseconds());
// We've reached SkipVideoDemuxToNextKeyFrame when our decoding is late.
// As such we can drop all already decoded samples and discard all pending
// samples.
- // TODO: Ideally we should set mOutputRequested to false so that all pending
- // frames are dropped too. However, we can't do such thing as the code assumes
- // that the decoder just got flushed. Once bug 1257107 land, we could set the
- // decoder threshold to the value of currentTime.
DropDecodedSamples(TrackInfo::kVideoTrack);
mVideo.mTrackDemuxer->SkipToNextRandomAccessPoint(aTimeThreshold)
->Then(OwnerThread(), __func__, this,
&MediaFormatReader::OnVideoSkipCompleted,
&MediaFormatReader::OnVideoSkipFailed)
->Track(mSkipRequest);
return;
@@ -2484,18 +2543,22 @@ MediaFormatReader::OnAudioSeekCompleted(
void
MediaFormatReader::OnAudioSeekFailed(const MediaResult& aError)
{
OnSeekFailed(TrackType::kAudioTrack, aError);
}
void MediaFormatReader::ReleaseResources()
{
- mVideo.ShutdownDecoder();
- mAudio.ShutdownDecoder();
+ LOGV("");
+ if (mShutdown) {
+ return;
+ }
+ ShutdownDecoder(TrackInfo::kAudioTrack);
+ ShutdownDecoder(TrackInfo::kVideoTrack);
}
bool
MediaFormatReader::VideoIsHardwareAccelerated() const
{
return mVideo.mIsHardwareAccelerated;
}
@@ -2634,19 +2697,18 @@ MediaFormatReader::GetMozDebugReaderData
MutexAutoLock mon(mVideo.mMutex);
videoName = mVideo.mDescription;
}
result += nsPrintfCString("audio decoder: %s\n", audioName);
result += nsPrintfCString("audio frames decoded: %lld\n",
mAudio.mNumSamplesOutputTotal);
if (HasAudio()) {
- result += nsPrintfCString("audio state: ni=%d no=%d ie=%d demuxr:%d demuxq:%d tt:%f tths:%d in:%llu out:%llu qs=%u pending:%u waiting:%d sid:%u\n",
+ result += nsPrintfCString("audio state: ni=%d no=%d demuxr:%d demuxq:%d tt:%f tths:%d in:%llu out:%llu qs=%u pending:%u waiting:%d sid:%u\n",
NeedInput(mAudio), mAudio.HasPromise(),
- mAudio.mDecodePending,
mAudio.mDemuxRequest.Exists(),
int(mAudio.mQueuedSamples.Length()),
mAudio.mTimeThreshold
? mAudio.mTimeThreshold.ref().Time().ToSeconds()
: -1.0,
mAudio.mTimeThreshold
? mAudio.mTimeThreshold.ref().mHasSeeked
: -1,
@@ -2658,19 +2720,18 @@ MediaFormatReader::GetMozDebugReaderData
}
result += nsPrintfCString("video decoder: %s\n", videoName);
result += nsPrintfCString("hardware video decoding: %s\n",
VideoIsHardwareAccelerated() ? "enabled" : "disabled");
result += nsPrintfCString("video frames decoded: %lld (skipped:%lld)\n",
mVideo.mNumSamplesOutputTotal,
mVideo.mNumSamplesSkippedTotal);
if (HasVideo()) {
- result += nsPrintfCString("video state: ni=%d no=%d ie=%d demuxr:%d demuxq:%d tt:%f tths:%d in:%llu out:%llu qs=%u pending:%u waiting:%d sid:%u\n",
+ result += nsPrintfCString("video state: ni=%d no=%d demuxr:%d demuxq:%d tt:%f tths:%d in:%llu out:%llu qs=%u pending:%u waiting:%d sid:%u\n",
NeedInput(mVideo), mVideo.HasPromise(),
- mVideo.mDecodePending,
mVideo.mDemuxRequest.Exists(),
int(mVideo.mQueuedSamples.Length()),
mVideo.mTimeThreshold
? mVideo.mTimeThreshold.ref().Time().ToSeconds()
: -1.0,
mVideo.mTimeThreshold
? mVideo.mTimeThreshold.ref().mHasSeeked
: -1,
@@ -2699,18 +2760,17 @@ MediaFormatReader::SetBlankDecode(TrackT
if (decoder.mIsBlankDecode == aIsBlankDecode) {
return;
}
LOG("%s, decoder.mIsBlankDecode = %d => aIsBlankDecode = %d",
TrackTypeToStr(aTrack), decoder.mIsBlankDecode, aIsBlankDecode);
decoder.mIsBlankDecode = aIsBlankDecode;
- decoder.Flush();
- decoder.ShutdownDecoder();
+ ShutdownDecoder(aTrack);
}
void
MediaFormatReader::OnFirstDemuxCompleted(TrackInfo::TrackType aType,
RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
{
MOZ_ASSERT(OnTaskQueue());
--- a/dom/media/MediaFormatReader.h
+++ b/dom/media/MediaFormatReader.h
@@ -131,92 +131,55 @@ private:
};
// Perform an internal seek to aTime. If aDropTarget is true then
// the first sample past the target will be dropped.
void InternalSeek(TrackType aTrack, const InternalSeekTarget& aTarget);
// Drain the current decoder.
void DrainDecoder(TrackType aTrack);
- void NotifyNewOutput(TrackType aTrack, MediaData* aSample);
- void NotifyInputExhausted(TrackType aTrack);
+ void NotifyNewOutput(TrackType aTrack,
+ const MediaDataDecoder::DecodedData& aResults);
void NotifyDrainComplete(TrackType aTrack);
void NotifyError(TrackType aTrack, const MediaResult& aError);
void NotifyWaitingForData(TrackType aTrack);
void NotifyEndOfStream(TrackType aTrack);
void ExtractCryptoInitData(nsTArray<uint8_t>& aInitData);
// Initializes mLayersBackendType if possible.
void InitLayersBackendType();
- // DecoderCallback proxies the MediaDataDecoderCallback calls to these
- // functions.
- void Output(TrackType aType, MediaData* aSample);
- void InputExhausted(TrackType aTrack);
- void Error(TrackType aTrack, const MediaResult& aError);
void Reset(TrackType aTrack);
- void DrainComplete(TrackType aTrack);
void DropDecodedSamples(TrackType aTrack);
bool ShouldSkip(bool aSkipToNextKeyframe, media::TimeUnit aTimeThreshold);
void SetVideoDecodeThreshold();
size_t SizeOfQueue(TrackType aTrack);
RefPtr<PDMFactory> mPlatform;
- class DecoderCallback : public MediaDataDecoderCallback {
- public:
- DecoderCallback(MediaFormatReader* aReader, TrackType aType)
- : mReader(aReader)
- , mType(aType)
- {
- }
- void Output(MediaData* aSample) override {
- mReader->Output(mType, aSample);
- }
- void InputExhausted() override {
- mReader->InputExhausted(mType);
- }
- void Error(const MediaResult& aError) override {
- mReader->Error(mType, aError);
- }
- void DrainComplete() override {
- mReader->DrainComplete(mType);
- }
- void ReleaseMediaResources() override {
- mReader->ReleaseResources();
- }
- bool OnReaderTaskQueue() override {
- return mReader->OnTaskQueue();
- }
- private:
- MediaFormatReader* mReader;
- TrackType mType;
- };
-
struct DecoderData {
DecoderData(MediaFormatReader* aOwner,
MediaData::Type aType,
uint32_t aNumOfMaxError)
: mOwner(aOwner)
, mType(aType)
, mMutex("DecoderData")
, mDescription("shutdown")
, mUpdateScheduled(false)
, mDemuxEOS(false)
, mWaitingForData(false)
, mReceivedNewData(false)
- , mOutputRequested(false)
- , mDecodePending(false)
, mNeedDraining(false)
, mDraining(false)
, mDrainComplete(false)
+ , mFlushed(true)
, mNumOfConsecutiveError(0)
, mMaxConsecutiveError(aNumOfMaxError)
, mNumSamplesInput(0)
, mNumSamplesOutput(0)
, mNumSamplesOutputTotal(0)
, mNumSamplesSkippedTotal(0)
, mSizeOfQueue(0)
, mIsHardwareAccelerated(false)
@@ -226,29 +189,39 @@ private:
MediaFormatReader* mOwner;
// Disambiguate Audio vs Video.
MediaData::Type mType;
RefPtr<MediaTrackDemuxer> mTrackDemuxer;
// TaskQueue on which decoder can choose to decode.
// Only non-null up until the decoder is created.
RefPtr<TaskQueue> mTaskQueue;
- // Callback that receives output and error notifications from the decoder.
- nsAutoPtr<DecoderCallback> mCallback;
// Mutex protecting mDescription and mDecoder.
Mutex mMutex;
// The platform decoder.
RefPtr<MediaDataDecoder> mDecoder;
const char* mDescription;
void ShutdownDecoder()
{
MutexAutoLock lock(mMutex);
if (mDecoder) {
- mDecoder->Shutdown();
+ RefPtr<MediaFormatReader> owner = mOwner;
+ TrackType type = mType == MediaData::AUDIO_DATA
+ ? TrackType::kAudioTrack
+ : TrackType::kVideoTrack;
+ mDecoder->Shutdown()
+ ->Then(mOwner->OwnerThread(), __func__,
+ [owner, this, type]() {
+ mShutdownRequest.Complete();
+ mShutdownPromise.ResolveIfExists(true, __func__);
+ owner->ScheduleUpdate(type);
+ },
+ []() { MOZ_RELEASE_ASSERT(false, "Can't ever be here"); })
+ ->Track(mShutdownRequest);
}
mDescription = "shutdown";
mDecoder = nullptr;
}
// Only accessed from reader's task queue.
bool mUpdateScheduled;
bool mDemuxEOS;
@@ -271,26 +244,26 @@ private:
}
bool IsWaiting() const
{
MOZ_ASSERT(mOwner->OnTaskQueue());
return mWaitingForData;
}
// MediaDataDecoder handler's variables.
- bool mOutputRequested;
- // Set to true once the MediaDataDecoder has been fed a compressed sample.
- // No more samples will be passed to the decoder while true.
- // mDecodePending is reset when:
- // 1- The decoder calls InputExhausted
- // 2- The decoder is Flushed or Reset.
- bool mDecodePending;
+ MozPromiseRequestHolder<MediaDataDecoder::DecodePromise> mDecodeRequest;
bool mNeedDraining;
+ MozPromiseRequestHolder<MediaDataDecoder::DecodePromise> mDrainRequest;
bool mDraining;
bool mDrainComplete;
+ MozPromiseRequestHolder<MediaDataDecoder::FlushPromise> mFlushRequest;
+ // Set to true if the last operation run on the decoder was a flush.
+ bool mFlushed;
+ MozPromiseHolder<ShutdownPromise> mShutdownPromise;
+ MozPromiseRequestHolder<ShutdownPromise> mShutdownRequest;
bool HasPendingDrain() const
{
return mDraining || mDrainComplete;
}
uint32_t mNumOfConsecutiveError;
uint32_t mMaxConsecutiveError;
@@ -344,46 +317,71 @@ private:
{
mDemuxRequest.DisconnectIfExists();
mSeekRequest.DisconnectIfExists();
mTrackDemuxer->Reset();
mQueuedSamples.Clear();
}
// Flush the decoder if present and reset decoding related data.
- // Decoding will be suspended until mInputRequested is set again.
// Following a flush, the decoder is ready to accept any new data.
void Flush()
{
- if (mDecoder) {
- mDecoder->Flush();
+ if (mFlushRequest.Exists() || mFlushed) {
+ // Flush still pending or already flushed, nothing more to do.
+ return;
}
- mOutputRequested = false;
- mDecodePending = false;
+ mDecodeRequest.DisconnectIfExists();
+ mDrainRequest.DisconnectIfExists();
mOutput.Clear();
mNumSamplesInput = 0;
mNumSamplesOutput = 0;
mSizeOfQueue = 0;
mDraining = false;
mDrainComplete = false;
+ if (mDecoder && !mFlushed) {
+ RefPtr<MediaFormatReader> owner = mOwner;
+ TrackType type = mType == MediaData::AUDIO_DATA
+ ? TrackType::kAudioTrack
+ : TrackType::kVideoTrack;
+ mDecoder->Flush()
+ ->Then(mOwner->OwnerThread(), __func__,
+ [owner, type, this]() {
+ mFlushRequest.Complete();
+ if (!mShutdownPromise.IsEmpty()) {
+ ShutdownDecoder();
+ return;
+ }
+ owner->ScheduleUpdate(type);
+ },
+ [owner, type, this](const MediaResult& aError) {
+ mFlushRequest.Complete();
+ if (!mShutdownPromise.IsEmpty()) {
+ ShutdownDecoder();
+ return;
+ }
+ owner->NotifyError(type, aError);
+ })
+ ->Track(mFlushRequest);
+ }
+ mFlushed = true;
}
// Reset the state of the DecoderData, clearing all queued frames
// (pending demuxed and decoded).
- // Decoding will be suspended until mInputRequested is set again.
// The track demuxer is *not* reset.
void ResetState()
{
MOZ_ASSERT(mOwner->OnTaskQueue());
mDemuxEOS = false;
mWaitingForData = false;
mQueuedSamples.Clear();
- mOutputRequested = false;
mNeedDraining = false;
- mDecodePending = false;
+ mDecodeRequest.DisconnectIfExists();
+ mDrainRequest.DisconnectIfExists();
mDraining = false;
mDrainComplete = false;
mTimeThreshold.reset();
mLastSampleTime.reset();
mOutput.Clear();
mNumSamplesInput = 0;
mNumSamplesOutput = 0;
mSizeOfQueue = 0;
@@ -569,13 +567,18 @@ private:
void OnFirstDemuxFailed(TrackInfo::TrackType aType, const MediaResult& aError);
void MaybeResolveMetadataPromise();
UniquePtr<MetadataTags> mTags;
// A flag indicating if the start time is known or not.
bool mHasStartTime = false;
+
+ void ShutdownDecoder(TrackType aTrack);
+ RefPtr<ShutdownPromise> ShutdownDecoderWithPromise(TrackType aTrack);
+ void TearDownDecoders();
+ MozPromiseHolder<ShutdownPromise> mShutdownPromise;
};
} // namespace mozilla
#endif
--- a/dom/media/ipc/PVideoDecoder.ipdl
+++ b/dom/media/ipc/PVideoDecoder.ipdl
@@ -31,18 +31,18 @@ struct VideoDataIPDL
};
struct MediaRawDataIPDL
{
MediaDataIPDL base;
Shmem buffer;
};
-// This protocol provides a way to use MediaDataDecoder/MediaDataDecoderCallback
-// across processes. The parent side currently is only implemented to work with
+// This protocol provides a way to use MediaDataDecoder across processes.
+// The parent side currently is only implemented to work with
// Window Media Foundation, but can be extended easily to support other backends.
// The child side runs in the content process, and the parent side runs in the
// GPU process. We run a separate IPDL thread for both sides.
async protocol PVideoDecoder
{
manager PVideoDecoderManager;
parent:
async Init();
--- a/dom/media/ipc/RemoteVideoDecoder.cpp
+++ b/dom/media/ipc/RemoteVideoDecoder.cpp
@@ -16,23 +16,19 @@
namespace mozilla {
namespace dom {
using base::Thread;
using namespace ipc;
using namespace layers;
using namespace gfx;
-RemoteVideoDecoder::RemoteVideoDecoder(MediaDataDecoderCallback* aCallback)
+RemoteVideoDecoder::RemoteVideoDecoder()
: mActor(new VideoDecoderChild())
{
-#ifdef DEBUG
- mCallback = aCallback;
-#endif
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
}
RemoteVideoDecoder::~RemoteVideoDecoder()
{
// We're about to be destroyed and drop our ref to
// VideoDecoderChild. Make sure we put a ref into the
// task queue for the VideoDecoderChild thread to keep
// it alive until we send the delete message.
@@ -49,96 +45,73 @@ RemoteVideoDecoder::~RemoteVideoDecoder(
mActor = nullptr;
VideoDecoderManagerChild::GetManagerThread()->Dispatch(task.forget(), NS_DISPATCH_NORMAL);
}
RefPtr<MediaDataDecoder::InitPromise>
RemoteVideoDecoder::Init()
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ RefPtr<RemoteVideoDecoder> self = this;
return InvokeAsync(VideoDecoderManagerChild::GetManagerAbstractThread(),
- this, __func__, &RemoteVideoDecoder::InitInternal);
-}
-
-RefPtr<MediaDataDecoder::InitPromise>
-RemoteVideoDecoder::InitInternal()
-{
- MOZ_ASSERT(mActor);
- MOZ_ASSERT(NS_GetCurrentThread() == VideoDecoderManagerChild::GetManagerThread());
- return mActor->Init();
+ __func__, [self, this]() { return mActor->Init(); });
}
-void
-RemoteVideoDecoder::Input(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+RemoteVideoDecoder::Decode(MediaRawData* aSample)
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
RefPtr<RemoteVideoDecoder> self = this;
RefPtr<MediaRawData> sample = aSample;
- VideoDecoderManagerChild::GetManagerThread()->Dispatch(NS_NewRunnableFunction([self, sample]() {
- MOZ_ASSERT(self->mActor);
- self->mActor->Input(sample);
- }), NS_DISPATCH_NORMAL);
+ return InvokeAsync(VideoDecoderManagerChild::GetManagerAbstractThread(),
+ __func__,
+ [self, this, sample]() { return mActor->Decode(sample); });
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
RemoteVideoDecoder::Flush()
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
- SynchronousTask task("Decoder flush");
- VideoDecoderManagerChild::GetManagerThread()->Dispatch(NS_NewRunnableFunction([&]() {
- MOZ_ASSERT(this->mActor);
- this->mActor->Flush(&task);
- }), NS_DISPATCH_NORMAL);
- task.Wait();
+ RefPtr<RemoteVideoDecoder> self = this;
+ return InvokeAsync(VideoDecoderManagerChild::GetManagerAbstractThread(),
+ __func__, [self, this]() { return mActor->Flush(); });
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
RemoteVideoDecoder::Drain()
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
RefPtr<RemoteVideoDecoder> self = this;
- VideoDecoderManagerChild::GetManagerThread()->Dispatch(NS_NewRunnableFunction([self]() {
- MOZ_ASSERT(self->mActor);
- self->mActor->Drain();
- }), NS_DISPATCH_NORMAL);
+ return InvokeAsync(VideoDecoderManagerChild::GetManagerAbstractThread(),
+ __func__, [self, this]() { return mActor->Drain(); });
}
-void
+RefPtr<ShutdownPromise>
RemoteVideoDecoder::Shutdown()
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
- SynchronousTask task("Shutdown");
RefPtr<RemoteVideoDecoder> self = this;
- VideoDecoderManagerChild::GetManagerThread()->Dispatch(NS_NewRunnableFunction([&]() {
- AutoCompleteTask complete(&task);
- MOZ_ASSERT(self->mActor);
- self->mActor->Shutdown();
- }), NS_DISPATCH_NORMAL);
- task.Wait();
+ return InvokeAsync(VideoDecoderManagerChild::GetManagerAbstractThread(),
+ __func__, [self, this]() {
+ mActor->Shutdown();
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+ });
}
bool
RemoteVideoDecoder::IsHardwareAccelerated(nsACString& aFailureReason) const
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
return mActor->IsHardwareAccelerated(aFailureReason);
}
void
RemoteVideoDecoder::SetSeekThreshold(const media::TimeUnit& aTime)
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
RefPtr<RemoteVideoDecoder> self = this;
media::TimeUnit time = aTime;
VideoDecoderManagerChild::GetManagerThread()->Dispatch(NS_NewRunnableFunction([=]() {
MOZ_ASSERT(self->mActor);
self->mActor->SetSeekThreshold(time);
}), NS_DISPATCH_NORMAL);
-
}
nsresult
RemoteDecoderModule::Startup()
{
if (!VideoDecoderManagerChild::GetManagerThread()) {
return NS_ERROR_FAILURE;
}
@@ -169,26 +142,23 @@ already_AddRefed<MediaDataDecoder>
RemoteDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
{
if (!MediaPrefs::PDMUseGPUDecoder() ||
!aParams.mKnowsCompositor ||
aParams.mKnowsCompositor->GetTextureFactoryIdentifier().mParentProcessType != GeckoProcessType_GPU) {
return mWrapped->CreateVideoDecoder(aParams);
}
- MediaDataDecoderCallback* callback = aParams.mCallback;
- MOZ_ASSERT(callback->OnReaderTaskQueue());
- RefPtr<RemoteVideoDecoder> object = new RemoteVideoDecoder(callback);
+ RefPtr<RemoteVideoDecoder> object = new RemoteVideoDecoder();
SynchronousTask task("InitIPDL");
bool success;
VideoDecoderManagerChild::GetManagerThread()->Dispatch(NS_NewRunnableFunction([&]() {
AutoCompleteTask complete(&task);
- success = object->mActor->InitIPDL(callback,
- aParams.VideoConfig(),
+ success = object->mActor->InitIPDL(aParams.VideoConfig(),
aParams.mKnowsCompositor->GetTextureFactoryIdentifier());
}), NS_DISPATCH_NORMAL);
task.Wait();
if (!success) {
return nullptr;
}
--- a/dom/media/ipc/RemoteVideoDecoder.h
+++ b/dom/media/ipc/RemoteVideoDecoder.h
@@ -23,38 +23,33 @@ class RemoteDecoderModule;
// operates solely on the VideoDecoderManagerChild thread.
class RemoteVideoDecoder : public MediaDataDecoder
{
public:
friend class RemoteDecoderModule;
// MediaDataDecoder
RefPtr<InitPromise> Init() override;
- void Input(MediaRawData* aSample) override;
- void Flush() override;
- void Drain() override;
- void Shutdown() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
void SetSeekThreshold(const media::TimeUnit& aTime) override;
const char* GetDescriptionName() const override { return "RemoteVideoDecoder"; }
private:
- explicit RemoteVideoDecoder(MediaDataDecoderCallback* aCallback);
+ RemoteVideoDecoder();
~RemoteVideoDecoder();
- RefPtr<InitPromise> InitInternal();
-
- // Only ever written to from the reader task queue (during the constructor and destructor
- // when we can guarantee no other threads are accessing it). Only read from the manager
- // thread.
+ // Only ever written to from the reader task queue (during the constructor and
+ // destructor when we can guarantee no other threads are accessing it). Only
+ // read from the manager thread.
RefPtr<VideoDecoderChild> mActor;
-#ifdef DEBUG
- MediaDataDecoderCallback* mCallback;
-#endif
};
// A PDM implementation that creates RemoteVideoDecoders.
// We currently require a 'wrapped' PDM in order to be able to answer SupportsMimeType
// and DecoderNeedsConversion. Ideally we'd check these over IPDL using the manager
// protocol
class RemoteDecoderModule : public PlatformDecoderModule
{
--- a/dom/media/ipc/VideoDecoderChild.cpp
+++ b/dom/media/ipc/VideoDecoderChild.cpp
@@ -5,32 +5,31 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "VideoDecoderChild.h"
#include "VideoDecoderManagerChild.h"
#include "mozilla/layers/TextureClient.h"
#include "base/thread.h"
#include "MediaInfo.h"
#include "ImageContainer.h"
#include "GPUVideoImage.h"
-#include "mozilla/layers/SynchronousTask.h"
namespace mozilla {
namespace dom {
using base::Thread;
using namespace ipc;
using namespace layers;
using namespace gfx;
VideoDecoderChild::VideoDecoderChild()
: mThread(VideoDecoderManagerChild::GetManagerThread())
- , mFlushTask(nullptr)
, mCanSend(false)
, mInitialized(false)
, mIsHardwareAccelerated(false)
+ , mNeedNewDecoder(false)
{
}
VideoDecoderChild::~VideoDecoderChild()
{
AssertOnManagerThread();
mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
}
@@ -49,54 +48,52 @@ VideoDecoderChild::RecvOutput(const Vide
RefPtr<VideoData> video = VideoData::CreateFromImage(info,
aData.base().offset(),
aData.base().time(),
aData.base().duration(),
image,
aData.base().keyframe(),
aData.base().timecode(),
IntRect());
- if (mCallback) {
- mCallback->Output(video);
- }
+ mDecodedData.AppendElement(Move(video));
return IPC_OK();
}
mozilla::ipc::IPCResult
VideoDecoderChild::RecvInputExhausted()
{
AssertOnManagerThread();
- if (mCallback) {
- mCallback->InputExhausted();
- }
+ mDecodePromise.ResolveIfExists(mDecodedData, __func__);
+ mDecodedData.Clear();
return IPC_OK();
}
mozilla::ipc::IPCResult
VideoDecoderChild::RecvDrainComplete()
{
AssertOnManagerThread();
- if (mCallback) {
- mCallback->DrainComplete();
- }
+ mDrainPromise.ResolveIfExists(mDecodedData, __func__);
+ mDecodedData.Clear();
return IPC_OK();
}
mozilla::ipc::IPCResult
VideoDecoderChild::RecvError(const nsresult& aError)
{
AssertOnManagerThread();
- if (mCallback) {
- mCallback->Error(aError);
- }
+ mDecodedData.Clear();
+ mDecodePromise.RejectIfExists(aError, __func__);
+ mDrainPromise.RejectIfExists(aError, __func__);
+ mFlushPromise.RejectIfExists(aError, __func__);
return IPC_OK();
}
mozilla::ipc::IPCResult
-VideoDecoderChild::RecvInitComplete(const bool& aHardware, const nsCString& aHardwareReason)
+VideoDecoderChild::RecvInitComplete(const bool& aHardware,
+ const nsCString& aHardwareReason)
{
AssertOnManagerThread();
mInitPromise.ResolveIfExists(TrackInfo::kVideoTrack, __func__);
mInitialized = true;
mIsHardwareAccelerated = aHardware;
mHardwareAcceleratedReason = aHardwareReason;
return IPC_OK();
}
@@ -107,63 +104,68 @@ VideoDecoderChild::RecvInitFailed(const
AssertOnManagerThread();
mInitPromise.RejectIfExists(aReason, __func__);
return IPC_OK();
}
mozilla::ipc::IPCResult
VideoDecoderChild::RecvFlushComplete()
{
- MOZ_ASSERT(mFlushTask);
- AutoCompleteTask complete(mFlushTask);
- mFlushTask = nullptr;
+ AssertOnManagerThread();
+ mFlushPromise.ResolveIfExists(true, __func__);
return IPC_OK();
}
void
VideoDecoderChild::ActorDestroy(ActorDestroyReason aWhy)
{
if (aWhy == AbnormalShutdown) {
// Defer reporting an error until we've recreated the manager so that
// it'll be safe for MediaFormatReader to recreate decoders
RefPtr<VideoDecoderChild> ref = this;
GetManager()->RunWhenRecreated(NS_NewRunnableFunction([=]() {
- if (ref->mInitialized && ref->mCallback) {
- ref->mCallback->Error(NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER);
+ if (ref->mInitialized) {
+ mDecodedData.Clear();
+ mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER,
+ __func__);
+ mDrainPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER,
+ __func__);
+ mFlushPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER,
+ __func__);
+ // Make sure the next request will be rejected accordingly if ever
+ // called.
+ mNeedNewDecoder = true;
} else {
- ref->mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER, __func__);
+ ref->mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER,
+ __func__);
}
}));
}
- if (mFlushTask) {
- AutoCompleteTask complete(mFlushTask);
- mFlushTask = nullptr;
- }
mCanSend = false;
}
bool
-VideoDecoderChild::InitIPDL(MediaDataDecoderCallback* aCallback,
- const VideoInfo& aVideoInfo,
+VideoDecoderChild::InitIPDL(const VideoInfo& aVideoInfo,
const layers::TextureFactoryIdentifier& aIdentifier)
{
- RefPtr<VideoDecoderManagerChild> manager = VideoDecoderManagerChild::GetSingleton();
- // If the manager isn't available, then don't initialize mIPDLSelfRef and leave
- // us in an error state. We'll then immediately reject the promise when Init()
- // is called and the caller can try again. Hopefully by then the new manager is
- // ready, or we've notified the caller of it being no longer available.
- // If not, then the cycle repeats until we're ready.
+ RefPtr<VideoDecoderManagerChild> manager =
+ VideoDecoderManagerChild::GetSingleton();
+ // If the manager isn't available, then don't initialize mIPDLSelfRef and
+ // leave us in an error state. We'll then immediately reject the promise when
+ // Init() is called and the caller can try again. Hopefully by then the new
+ // manager is ready, or we've notified the caller of it being no longer
+ // available. If not, then the cycle repeats until we're ready.
if (!manager || !manager->CanSend()) {
return true;
}
mIPDLSelfRef = this;
- mCallback = aCallback;
bool success = false;
- if (manager->SendPVideoDecoderConstructor(this, aVideoInfo, aIdentifier, &success)) {
+ if (manager->SendPVideoDecoderConstructor(this, aVideoInfo, aIdentifier,
+ &success)) {
mCanSend = true;
}
return success;
}
void
VideoDecoderChild::DestroyIPDL()
{
@@ -192,76 +194,93 @@ VideoDecoderChild::Init()
// If we failed to send this, then we'll still resolve the Init promise
// as ActorDestroy handles it.
if (mCanSend) {
SendInit();
}
return mInitPromise.Ensure(__func__);
}
-void
-VideoDecoderChild::Input(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+VideoDecoderChild::Decode(MediaRawData* aSample)
{
AssertOnManagerThread();
+
+ if (mNeedNewDecoder) {
+ return MediaDataDecoder::DecodePromise::CreateAndReject(
+ NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER, __func__);
+ }
if (!mCanSend) {
- return;
+ // We're here if the IPC channel has died but we're still waiting for the
+ // RunWhenRecreated task to complete. The decode promise will be rejected
+ // when that task is run.
+ return mDecodePromise.Ensure(__func__);
}
// TODO: It would be nice to add an allocator method to
// MediaDataDecoder so that the demuxer could write directly
// into shmem rather than requiring a copy here.
Shmem buffer;
if (!AllocShmem(aSample->Size(), Shmem::SharedMemory::TYPE_BASIC, &buffer)) {
- mCallback->Error(NS_ERROR_DOM_MEDIA_DECODE_ERR);
- return;
+ return MediaDataDecoder::DecodePromise::CreateAndReject(
+ NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__);
}
memcpy(buffer.get<uint8_t>(), aSample->Data(), aSample->Size());
MediaRawDataIPDL sample(MediaDataIPDL(aSample->mOffset,
aSample->mTime,
aSample->mTimecode,
aSample->mDuration,
aSample->mFrames,
aSample->mKeyframe),
buffer);
SendInput(sample);
+ return mDecodePromise.Ensure(__func__);
}
-void
-VideoDecoderChild::Flush(SynchronousTask* aTask)
+RefPtr<MediaDataDecoder::FlushPromise>
+VideoDecoderChild::Flush()
{
AssertOnManagerThread();
+ mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ mDrainPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ if (mNeedNewDecoder) {
+ return MediaDataDecoder::FlushPromise::CreateAndReject(
+ NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER, __func__);
+ }
if (mCanSend) {
SendFlush();
- mFlushTask = aTask;
- } else {
- AutoCompleteTask complete(aTask);
}
+ return mFlushPromise.Ensure(__func__);
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
VideoDecoderChild::Drain()
{
AssertOnManagerThread();
+ if (mNeedNewDecoder) {
+ return MediaDataDecoder::DecodePromise::CreateAndReject(
+ NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER, __func__);
+ }
if (mCanSend) {
SendDrain();
}
+ return mDrainPromise.Ensure(__func__);
}
void
VideoDecoderChild::Shutdown()
{
AssertOnManagerThread();
mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
if (mCanSend) {
SendShutdown();
}
mInitialized = false;
- mCallback = nullptr;
}
bool
VideoDecoderChild::IsHardwareAccelerated(nsACString& aFailureReason) const
{
aFailureReason = mHardwareAcceleratedReason;
return mIsHardwareAccelerated;
}
--- a/dom/media/ipc/VideoDecoderChild.h
+++ b/dom/media/ipc/VideoDecoderChild.h
@@ -1,25 +1,20 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=99: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef include_dom_ipc_VideoDecoderChild_h
#define include_dom_ipc_VideoDecoderChild_h
-#include "mozilla/RefPtr.h"
+#include "PlatformDecoderModule.h"
#include "mozilla/dom/PVideoDecoderChild.h"
-#include "MediaData.h"
-#include "PlatformDecoderModule.h"
namespace mozilla {
-namespace layers {
-class SynchronousTask;
-}
namespace dom {
class RemoteVideoDecoder;
class RemoteDecoderModule;
class VideoDecoderManagerChild;
class VideoDecoderChild final : public PVideoDecoderChild
{
@@ -35,50 +30,52 @@ public:
mozilla::ipc::IPCResult RecvError(const nsresult& aError) override;
mozilla::ipc::IPCResult RecvInitComplete(const bool& aHardware, const nsCString& aHardwareReason) override;
mozilla::ipc::IPCResult RecvInitFailed(const nsresult& aReason) override;
mozilla::ipc::IPCResult RecvFlushComplete() override;
void ActorDestroy(ActorDestroyReason aWhy) override;
RefPtr<MediaDataDecoder::InitPromise> Init();
- void Input(MediaRawData* aSample);
- void Flush(layers::SynchronousTask* Task);
- void Drain();
+ RefPtr<MediaDataDecoder::DecodePromise> Decode(MediaRawData* aSample);
+ RefPtr<MediaDataDecoder::DecodePromise> Drain();
+ RefPtr<MediaDataDecoder::FlushPromise> Flush();
void Shutdown();
bool IsHardwareAccelerated(nsACString& aFailureReason) const;
void SetSeekThreshold(const media::TimeUnit& aTime);
MOZ_IS_CLASS_INIT
- bool InitIPDL(MediaDataDecoderCallback* aCallback,
- const VideoInfo& aVideoInfo,
+ bool InitIPDL(const VideoInfo& aVideoInfo,
const layers::TextureFactoryIdentifier& aIdentifier);
void DestroyIPDL();
// Called from IPDL when our actor has been destroyed
void IPDLActorDestroyed();
VideoDecoderManagerChild* GetManager();
private:
~VideoDecoderChild();
void AssertOnManagerThread();
RefPtr<VideoDecoderChild> mIPDLSelfRef;
RefPtr<nsIThread> mThread;
- MediaDataDecoderCallback* mCallback;
-
MozPromiseHolder<MediaDataDecoder::InitPromise> mInitPromise;
-
- layers::SynchronousTask* mFlushTask;
+ MozPromiseHolder<MediaDataDecoder::DecodePromise> mDecodePromise;
+ MozPromiseHolder<MediaDataDecoder::DecodePromise> mDrainPromise;
+ MozPromiseHolder<MediaDataDecoder::FlushPromise> mFlushPromise;
nsCString mHardwareAcceleratedReason;
bool mCanSend;
bool mInitialized;
bool mIsHardwareAccelerated;
+ // Set to true if the actor got destroyed and we haven't yet notified the
+ // caller.
+ bool mNeedNewDecoder;
+ MediaDataDecoder::DecodedData mDecodedData;
};
} // namespace dom
} // namespace mozilla
#endif // include_dom_ipc_VideoDecoderChild_h
--- a/dom/media/ipc/VideoDecoderParent.cpp
+++ b/dom/media/ipc/VideoDecoderParent.cpp
@@ -67,17 +67,16 @@ VideoDecoderParent::VideoDecoderParent(V
// TODO: Ideally we wouldn't hardcode the WMF PDM, and we'd use the normal PDM
// factory logic for picking a decoder.
WMFDecoderModule::Init();
RefPtr<WMFDecoderModule> pdm(new WMFDecoderModule());
pdm->Startup();
CreateDecoderParams params(aVideoInfo);
params.mTaskQueue = mDecodeTaskQueue;
- params.mCallback = this;
params.mKnowsCompositor = mKnowsCompositor;
params.mImageContainer = new layers::ImageContainer();
mDecoder = pdm->CreateVideoDecoder(params);
#else
MOZ_ASSERT(false, "Can't use RemoteVideoDecoder on non-Windows platforms yet");
#endif
@@ -129,47 +128,101 @@ VideoDecoderParent::RecvInput(const Medi
data->mOffset = aData.base().offset();
data->mTime = aData.base().time();
data->mTimecode = aData.base().timecode();
data->mDuration = aData.base().duration();
data->mKeyframe = aData.base().keyframe();
DeallocShmem(aData.buffer());
- mDecoder->Input(data);
+ RefPtr<VideoDecoderParent> self = this;
+ mDecoder->Decode(data)->Then(
+ mManagerTaskQueue, __func__,
+ [self, this](const MediaDataDecoder::DecodedData& aResults) {
+ if (mDestroyed) {
+ return;
+ }
+ ProcessDecodedData(aResults);
+ Unused << SendInputExhausted();
+ },
+ [self, this](const MediaResult& aError) { Error(aError); });
return IPC_OK();
}
+void
+VideoDecoderParent::ProcessDecodedData(
+ const MediaDataDecoder::DecodedData& aData)
+{
+ MOZ_ASSERT(OnManagerThread());
+
+ for (const auto& data : aData) {
+ MOZ_ASSERT(data->mType == MediaData::VIDEO_DATA,
+ "Can only decode videos using VideoDecoderParent!");
+ VideoData* video = static_cast<VideoData*>(data.get());
+
+ MOZ_ASSERT(video->mImage, "Decoded video must output a layer::Image to "
+ "be used with VideoDecoderParent");
+
+ RefPtr<TextureClient> texture =
+ video->mImage->GetTextureClient(mKnowsCompositor);
+
+ if (!texture) {
+ texture = ImageClient::CreateTextureClientForImage(video->mImage,
+ mKnowsCompositor);
+ }
+
+ if (texture && !texture->IsAddedToCompositableClient()) {
+ texture->InitIPDLActor(mKnowsCompositor);
+ texture->SetAddedToCompositableClient();
+ }
+
+ VideoDataIPDL output(
+ MediaDataIPDL(data->mOffset, data->mTime, data->mTimecode,
+ data->mDuration, data->mFrames, data->mKeyframe),
+ video->mDisplay,
+ texture ? mParent->StoreImage(video->mImage, texture)
+ : SurfaceDescriptorGPUVideo(0),
+ video->mFrameID);
+ Unused << SendOutput(output);
+ }
+}
+
mozilla::ipc::IPCResult
VideoDecoderParent::RecvFlush()
{
MOZ_ASSERT(!mDestroyed);
MOZ_ASSERT(OnManagerThread());
- if (mDecoder) {
- mDecoder->Flush();
- }
+ RefPtr<VideoDecoderParent> self = this;
+ mDecoder->Flush()->Then(
+ mManagerTaskQueue, __func__,
+ [self, this]() {
+ if (!mDestroyed) {
+ Unused << SendFlushComplete();
+ }
+ },
+ [self, this](const MediaResult& aError) { Error(aError); });
- // Dispatch a runnable to our own event queue so that
- // it will be processed after anything that got dispatched
- // during the Flush call.
- RefPtr<VideoDecoderParent> self = this;
- mManagerTaskQueue->Dispatch(NS_NewRunnableFunction([self]() {
- if (!self->mDestroyed) {
- Unused << self->SendFlushComplete();
- }
- }));
return IPC_OK();
}
mozilla::ipc::IPCResult
VideoDecoderParent::RecvDrain()
{
MOZ_ASSERT(!mDestroyed);
MOZ_ASSERT(OnManagerThread());
- mDecoder->Drain();
+ RefPtr<VideoDecoderParent> self = this;
+ mDecoder->Drain()->Then(
+ mManagerTaskQueue, __func__,
+ [self, this](const MediaDataDecoder::DecodedData& aResults) {
+ if (!mDestroyed) {
+ ProcessDecodedData(aResults);
+ Unused << SendDrainComplete();
+ }
+ },
+ [self, this](const MediaResult& aError) { Error(aError); });
return IPC_OK();
}
mozilla::ipc::IPCResult
VideoDecoderParent::RecvShutdown()
{
MOZ_ASSERT(!mDestroyed);
MOZ_ASSERT(OnManagerThread());
@@ -199,100 +252,22 @@ VideoDecoderParent::ActorDestroy(ActorDe
mDecoder = nullptr;
}
if (mDecodeTaskQueue) {
mDecodeTaskQueue->BeginShutdown();
}
}
void
-VideoDecoderParent::Output(MediaData* aData)
-{
- MOZ_ASSERT(mDecodeTaskQueue->IsCurrentThreadIn());
- RefPtr<VideoDecoderParent> self = this;
- RefPtr<KnowsCompositor> knowsCompositor = mKnowsCompositor;
- RefPtr<MediaData> data = aData;
- mManagerTaskQueue->Dispatch(NS_NewRunnableFunction([self, knowsCompositor, data]() {
- if (self->mDestroyed) {
- return;
- }
-
- MOZ_ASSERT(data->mType == MediaData::VIDEO_DATA, "Can only decode videos using VideoDecoderParent!");
- VideoData* video = static_cast<VideoData*>(data.get());
-
- MOZ_ASSERT(video->mImage, "Decoded video must output a layer::Image to be used with VideoDecoderParent");
-
- RefPtr<TextureClient> texture = video->mImage->GetTextureClient(knowsCompositor);
-
- if (!texture) {
- texture = ImageClient::CreateTextureClientForImage(video->mImage, knowsCompositor);
- }
-
- if (texture && !texture->IsAddedToCompositableClient()) {
- texture->InitIPDLActor(knowsCompositor);
- texture->SetAddedToCompositableClient();
- }
-
- VideoDataIPDL output(MediaDataIPDL(data->mOffset,
- data->mTime,
- data->mTimecode,
- data->mDuration,
- data->mFrames,
- data->mKeyframe),
- video->mDisplay,
- texture ? self->mParent->StoreImage(video->mImage, texture) : SurfaceDescriptorGPUVideo(0),
- video->mFrameID);
- Unused << self->SendOutput(output);
- }));
-}
-
-void
VideoDecoderParent::Error(const MediaResult& aError)
{
- MOZ_ASSERT(mDecodeTaskQueue->IsCurrentThreadIn());
- RefPtr<VideoDecoderParent> self = this;
- MediaResult error = aError;
- mManagerTaskQueue->Dispatch(NS_NewRunnableFunction([self, error]() {
- if (!self->mDestroyed) {
- Unused << self->SendError(error);
- }
- }));
-}
-
-void
-VideoDecoderParent::InputExhausted()
-{
- MOZ_ASSERT(mDecodeTaskQueue->IsCurrentThreadIn());
- RefPtr<VideoDecoderParent> self = this;
- mManagerTaskQueue->Dispatch(NS_NewRunnableFunction([self]() {
- if (!self->mDestroyed) {
- Unused << self->SendInputExhausted();
- }
- }));
-}
-
-void
-VideoDecoderParent::DrainComplete()
-{
- MOZ_ASSERT(mDecodeTaskQueue->IsCurrentThreadIn());
- RefPtr<VideoDecoderParent> self = this;
- mManagerTaskQueue->Dispatch(NS_NewRunnableFunction([self]() {
- if (!self->mDestroyed) {
- Unused << self->SendDrainComplete();
- }
- }));
-}
-
-bool
-VideoDecoderParent::OnReaderTaskQueue()
-{
- // Most of our calls into mDecoder come directly from IPDL so are on
- // the right thread, but not actually on the task queue. We only ever
- // run a single thread, not a pool, so this should work fine.
- return OnManagerThread();
+ MOZ_ASSERT(OnManagerThread());
+ if (!mDestroyed) {
+ Unused << SendError(aError);
+ }
}
bool
VideoDecoderParent::OnManagerThread()
{
return mParent->OnManagerThread();
}
--- a/dom/media/ipc/VideoDecoderParent.h
+++ b/dom/media/ipc/VideoDecoderParent.h
@@ -1,30 +1,30 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=99: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef include_dom_ipc_VideoDecoderParent_h
#define include_dom_ipc_VideoDecoderParent_h
-#include "mozilla/RefPtr.h"
+#include "ImageContainer.h"
+#include "MediaData.h"
+#include "PlatformDecoderModule.h"
+#include "VideoDecoderManagerParent.h"
+#include "mozilla/MozPromise.h"
#include "mozilla/dom/PVideoDecoderParent.h"
#include "mozilla/layers/TextureForwarder.h"
-#include "VideoDecoderManagerParent.h"
-#include "MediaData.h"
-#include "ImageContainer.h"
namespace mozilla {
namespace dom {
class KnowsCompositorVideo;
-class VideoDecoderParent final : public PVideoDecoderParent,
- public MediaDataDecoderCallback
+class VideoDecoderParent final : public PVideoDecoderParent
{
public:
// We refcount this class since the task queue can have runnables
// that reference us.
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoDecoderParent)
VideoDecoderParent(VideoDecoderManagerParent* aParent,
const VideoInfo& aVideoInfo,
@@ -40,27 +40,22 @@ public:
mozilla::ipc::IPCResult RecvInput(const MediaRawDataIPDL& aData) override;
mozilla::ipc::IPCResult RecvFlush() override;
mozilla::ipc::IPCResult RecvDrain() override;
mozilla::ipc::IPCResult RecvShutdown() override;
mozilla::ipc::IPCResult RecvSetSeekThreshold(const int64_t& aTime) override;
void ActorDestroy(ActorDestroyReason aWhy) override;
- // MediaDataDecoderCallback
- void Output(MediaData* aData) override;
- void Error(const MediaResult& aError) override;
- void InputExhausted() override;
- void DrainComplete() override;
- bool OnReaderTaskQueue() override;
-
private:
bool OnManagerThread();
+ void Error(const MediaResult& aError);
~VideoDecoderParent();
+ void ProcessDecodedData(const MediaDataDecoder::DecodedData& aData);
RefPtr<VideoDecoderManagerParent> mParent;
RefPtr<VideoDecoderParent> mIPDLSelfRef;
RefPtr<TaskQueue> mManagerTaskQueue;
RefPtr<TaskQueue> mDecodeTaskQueue;
RefPtr<MediaDataDecoder> mDecoder;
RefPtr<KnowsCompositorVideo> mKnowsCompositor;
--- a/dom/media/platforms/PlatformDecoderModule.h
+++ b/dom/media/platforms/PlatformDecoderModule.h
@@ -29,26 +29,27 @@ namespace layers {
class ImageContainer;
} // namespace layers
namespace dom {
class RemoteDecoderModule;
}
class MediaDataDecoder;
-class MediaDataDecoderCallback;
class TaskQueue;
class CDMProxy;
static LazyLogModule sPDMLog("PlatformDecoderModule");
-struct MOZ_STACK_CLASS CreateDecoderParams final {
+struct MOZ_STACK_CLASS CreateDecoderParams final
+{
explicit CreateDecoderParams(const TrackInfo& aConfig)
: mConfig(aConfig)
- {}
+ {
+ }
template <typename T1, typename... Ts>
CreateDecoderParams(const TrackInfo& aConfig, T1&& a1, Ts&&... args)
: mConfig(aConfig)
{
Set(mozilla::Forward<T1>(a1), mozilla::Forward<Ts>(args)...);
}
@@ -69,33 +70,40 @@ struct MOZ_STACK_CLASS CreateDecoderPara
if (mKnowsCompositor) {
return mKnowsCompositor->GetCompositorBackendType();
}
return layers::LayersBackend::LAYERS_NONE;
}
const TrackInfo& mConfig;
TaskQueue* mTaskQueue = nullptr;
- MediaDataDecoderCallback* mCallback = nullptr;
DecoderDoctorDiagnostics* mDiagnostics = nullptr;
layers::ImageContainer* mImageContainer = nullptr;
MediaResult* mError = nullptr;
RefPtr<layers::KnowsCompositor> mKnowsCompositor;
RefPtr<GMPCrashHelper> mCrashHelper;
bool mUseBlankDecoder = false;
private:
void Set(TaskQueue* aTaskQueue) { mTaskQueue = aTaskQueue; }
- void Set(MediaDataDecoderCallback* aCallback) { mCallback = aCallback; }
- void Set(DecoderDoctorDiagnostics* aDiagnostics) { mDiagnostics = aDiagnostics; }
- void Set(layers::ImageContainer* aImageContainer) { mImageContainer = aImageContainer; }
+ void Set(DecoderDoctorDiagnostics* aDiagnostics)
+ {
+ mDiagnostics = aDiagnostics;
+ }
+ void Set(layers::ImageContainer* aImageContainer)
+ {
+ mImageContainer = aImageContainer;
+ }
void Set(MediaResult* aError) { mError = aError; }
void Set(GMPCrashHelper* aCrashHelper) { mCrashHelper = aCrashHelper; }
void Set(bool aUseBlankDecoder) { mUseBlankDecoder = aUseBlankDecoder; }
- void Set(layers::KnowsCompositor* aKnowsCompositor) { mKnowsCompositor = aKnowsCompositor; }
+ void Set(layers::KnowsCompositor* aKnowsCompositor)
+ {
+ mKnowsCompositor = aKnowsCompositor;
+ }
template <typename T1, typename T2, typename... Ts>
void Set(T1&& a1, T2&& a2, Ts&&... args)
{
Set(mozilla::Forward<T1>(a1));
Set(mozilla::Forward<T2>(a2), mozilla::Forward<Ts>(args)...);
}
};
@@ -108,17 +116,18 @@ private:
//
// Decoding is asynchronous, and should be performed on the task queue
// provided if the underlying platform isn't already exposing an async API.
//
// A cross-platform decoder module that discards input and produces "blank"
// output samples exists for testing, and is created when the pref
// "media.use-blank-decoder" is true.
-class PlatformDecoderModule {
+class PlatformDecoderModule
+{
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(PlatformDecoderModule)
// Perform any per-instance initialization.
// This is called on the decode task queue.
virtual nsresult Startup() { return NS_OK; };
// Indicates if the PlatformDecoderModule supports decoding of aMimeType.
@@ -127,17 +136,18 @@ public:
virtual bool Supports(const TrackInfo& aTrackInfo,
DecoderDoctorDiagnostics* aDiagnostics) const
{
// By default, fall back to SupportsMimeType with just the MIME string.
// (So PDMs do not need to override this method -- yet.)
return SupportsMimeType(aTrackInfo.mMimeType, aDiagnostics);
}
- enum class ConversionRequired : uint8_t {
+ enum class ConversionRequired : uint8_t
+ {
kNeedNone,
kNeedAVCC,
kNeedAnnexB,
};
// Indicates that the decoder requires a specific format.
// The PlatformDecoderModule will convert the demuxed data accordingly before
// feeding it to MediaDataDecoder::Input.
@@ -152,67 +162,37 @@ protected:
friend class dom::RemoteDecoderModule;
friend class EMEDecoderModule;
// Creates a Video decoder. The layers backend is passed in so that
// decoders can determine whether hardware accelerated decoding can be used.
// Asynchronous decoding of video should be done in runnables dispatched
// to aVideoTaskQueue. If the task queue isn't needed, the decoder should
// not hold a reference to it.
- // Output and errors should be returned to the reader via aCallback.
// On Windows the task queue's threads in have MSCOM initialized with
// COINIT_MULTITHREADED.
// Returns nullptr if the decoder can't be created.
// It is safe to store a reference to aConfig.
// This is called on the decode task queue.
virtual already_AddRefed<MediaDataDecoder>
CreateVideoDecoder(const CreateDecoderParams& aParams) = 0;
// Creates an Audio decoder with the specified properties.
// Asynchronous decoding of audio should be done in runnables dispatched to
// aAudioTaskQueue. If the task queue isn't needed, the decoder should
// not hold a reference to it.
- // Output and errors should be returned to the reader via aCallback.
// Returns nullptr if the decoder can't be created.
// On Windows the task queue's threads in have MSCOM initialized with
// COINIT_MULTITHREADED.
// It is safe to store a reference to aConfig.
// This is called on the decode task queue.
virtual already_AddRefed<MediaDataDecoder>
CreateAudioDecoder(const CreateDecoderParams& aParams) = 0;
};
-// A callback used by MediaDataDecoder to return output/errors to the
-// MediaFormatReader.
-// Implementation is threadsafe, and can be called on any thread.
-class MediaDataDecoderCallback {
-public:
- virtual ~MediaDataDecoderCallback() {}
-
- // Called by MediaDataDecoder when a sample has been decoded.
- virtual void Output(MediaData* aData) = 0;
-
- // Denotes an error in the decoding process. The reader will stop calling
- // the decoder.
- virtual void Error(const MediaResult& aError) = 0;
-
- // Denotes that the last input sample has been inserted into the decoder,
- // and no more output can be produced unless more input is sent.
- // A frame decoding session is completed once InputExhausted has been called.
- // MediaDataDecoder::Input will not be called again until InputExhausted has
- // been called.
- virtual void InputExhausted() = 0;
-
- virtual void DrainComplete() = 0;
-
- virtual void ReleaseMediaResources() {}
-
- virtual bool OnReaderTaskQueue() = 0;
-};
-
// MediaDataDecoder is the interface exposed by decoders created by the
// PlatformDecoderModule's Create*Decoder() functions. The type of
// media data that the decoder accepts as valid input and produces as
// output is determined when the MediaDataDecoder is created.
//
// Unless otherwise noted, all functions are only called on the decode task
// queue. An exception is the MediaDataDecoder in
// MediaFormatReader::IsVideoAccelerated() for which all calls (Init(),
@@ -220,74 +200,74 @@ public:
//
// Don't block inside these functions, unless it's explicitly noted that you
// should (like in Flush()).
//
// Decoding is done asynchronously. Any async work can be done on the
// TaskQueue passed into the PlatformDecoderModules's Create*Decoder()
// function. This may not be necessary for platforms with async APIs
// for decoding.
-//
-// If an error occurs at any point after the Init promise has been
-// completed, then Error() must be called on the associated
-// MediaDataDecoderCallback.
-class MediaDataDecoder {
+class MediaDataDecoder
+{
protected:
virtual ~MediaDataDecoder() {};
public:
typedef TrackInfo::TrackType TrackType;
- typedef MozPromise<TrackType, MediaResult, /* IsExclusive = */ true> InitPromise;
+ typedef nsTArray<RefPtr<MediaData>> DecodedData;
+ typedef MozPromise<TrackType, MediaResult, /* IsExclusive = */ true>
+ InitPromise;
+ typedef MozPromise<DecodedData, MediaResult, /* IsExclusive = */ true>
+ DecodePromise;
+ typedef MozPromise<bool, MediaResult, /* IsExclusive = */ true> FlushPromise;
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaDataDecoder)
// Initialize the decoder. The decoder should be ready to decode once
// promise resolves. The decoder should do any initialization here, rather
// than in its constructor or PlatformDecoderModule::Create*Decoder(),
// so that if the MediaFormatReader needs to shutdown during initialization,
// it can call Shutdown() to cancel this operation. Any initialization
// that requires blocking the calling thread in this function *must*
// be done here so that it can be canceled by calling Shutdown()!
virtual RefPtr<InitPromise> Init() = 0;
- // Inserts a sample into the decoder's decode pipeline.
- virtual void Input(MediaRawData* aSample) = 0;
-
- // Causes all samples in the decoding pipeline to be discarded. When
- // this function returns, the decoder must be ready to accept new input
- // for decoding. This function is called when the demuxer seeks, before
- // decoding resumes after the seek.
- // While the reader calls Flush(), it ignores all output sent to it;
- // it is safe (but pointless) to send output while Flush is called.
- // The MediaFormatReader will not call Input() while it's calling Flush().
- virtual void Flush() = 0;
+ // Inserts a sample into the decoder's decode pipeline. The DecodePromise will
+ // be resolved with the decoded MediaData. In case the decoder needs more
+ // input, the DecodePromise may be resolved with an empty array of samples to
+ // indicate that Decode should be called again before a MediaData is returned.
+ virtual RefPtr<DecodePromise> Decode(MediaRawData* aSample) = 0;
// Causes all complete samples in the pipeline that can be decoded to be
// output. If the decoder can't produce samples from the current output,
// it drops the input samples. The decoder may be holding onto samples
// that are required to decode samples that it expects to get in future.
// This is called when the demuxer reaches end of stream.
- // The MediaFormatReader will not call Input() while it's calling Drain().
- // This function is asynchronous. The MediaDataDecoder must call
- // MediaDataDecoderCallback::DrainComplete() once all remaining
- // samples have been output.
- virtual void Drain() = 0;
+ // This function is asynchronous. The MediaDataDecoder shall resolve the
+ // pending DecodePromise will all drained samples.
+ virtual RefPtr<DecodePromise> Drain() = 0;
+
+ // Causes all samples in the decoding pipeline to be discarded. When this
+ // promise resolves, the decoder must be ready to accept new data for
+ // decoding. This function is called when the demuxer seeks, before decoding
+ // resumes after the seek. The current DecodePromise if any shall be rejected
+ // with NS_ERROR_DOM_MEDIA_CANCELED
+ virtual RefPtr<FlushPromise> Flush() = 0;
- // Cancels all init/input/drain operations, and shuts down the
- // decoder. The platform decoder should clean up any resources it's using
- // and release memory etc. Shutdown() must block until the decoder has
- // completed shutdown. The reader calls Flush() before calling Shutdown().
- // The reader will delete the decoder once Shutdown() returns.
- // The MediaDataDecoderCallback *must* not be called after Shutdown() has
- // returned.
- virtual void Shutdown() = 0;
+ // Cancels all init/decode/drain operations, and shuts down the decoder. The
+ // platform decoder should clean up any resources it's using and release
+ // memory etc. The shutdown promise will be resolved once the decoder has
+ // completed shutdown. The reader calls Flush() before calling Shutdown(). The
+ // reader will delete the decoder once the promise is resolved.
+ // The ShutdownPromise must only ever be resolved.
+ virtual RefPtr<ShutdownPromise> Shutdown() = 0;
- // Called from the state machine task queue or main thread.
- // Decoder needs to decide whether or not hardware accelearation is supported
- // after creating. It doesn't need to call Init() before calling this function.
+ // Called from the state machine task queue or main thread. Decoder needs to
+ // decide whether or not hardware acceleration is supported after creating.
+ // It doesn't need to call Init() before calling this function.
virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const { return false; }
// Return the name of the MediaDataDecoder, only used for decoding.
// Only return a static const string, as the information may be accessed
// in a non thread-safe fashion.
virtual const char* GetDescriptionName() const = 0;
// Set a hint of seek target time to decoder. Decoder will drop any decoded
--- a/dom/media/platforms/agnostic/BlankDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/BlankDecoderModule.cpp
@@ -27,82 +27,78 @@ namespace mozilla {
// MediaData objects.
template<class BlankMediaDataCreator>
class BlankMediaDataDecoder : public MediaDataDecoder {
public:
BlankMediaDataDecoder(BlankMediaDataCreator* aCreator,
const CreateDecoderParams& aParams)
: mCreator(aCreator)
- , mCallback(aParams.mCallback)
, mMaxRefFrames(aParams.mConfig.GetType() == TrackInfo::kVideoTrack &&
MP4Decoder::IsH264(aParams.mConfig.mMimeType)
? mp4_demuxer::AnnexB::HasSPS(aParams.VideoConfig().mExtraData)
? mp4_demuxer::H264::ComputeMaxRefFrames(aParams.VideoConfig().mExtraData)
: 16
: 0)
, mType(aParams.mConfig.GetType())
{
}
RefPtr<InitPromise> Init() override {
return InitPromise::CreateAndResolve(mType, __func__);
}
- void Shutdown() override {}
+ RefPtr<ShutdownPromise> Shutdown() override
+ {
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+ }
- void Input(MediaRawData* aSample) override
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override
{
RefPtr<MediaData> data =
mCreator->Create(media::TimeUnit::FromMicroseconds(aSample->mTime),
media::TimeUnit::FromMicroseconds(aSample->mDuration),
aSample->mOffset);
- OutputFrame(data);
- }
+ if (!data) {
+ return DecodePromise::CreateAndReject(NS_ERROR_OUT_OF_MEMORY, __func__);
+ }
- void Flush() override
- {
- mReorderQueue.Clear();
+ // Frames come out in DTS order but we need to output them in PTS order.
+ mReorderQueue.Push(data);
+
+ if (mReorderQueue.Length() > mMaxRefFrames) {
+ return DecodePromise::CreateAndResolve(
+ DecodedData{ mReorderQueue.Pop().get() }, __func__);
+ }
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
}
- void Drain() override
+ RefPtr<DecodePromise> Drain() override
{
+ DecodedData samples;
while (!mReorderQueue.IsEmpty()) {
- mCallback->Output(mReorderQueue.Pop().get());
+ samples.AppendElement(mReorderQueue.Pop().get());
}
+ return DecodePromise::CreateAndResolve(samples, __func__);
+ }
- mCallback->DrainComplete();
+ RefPtr<FlushPromise> Flush() override
+ {
+ mReorderQueue.Clear();
+ return FlushPromise::CreateAndResolve(true, __func__);
}
const char* GetDescriptionName() const override
{
return "blank media data decoder";
}
private:
- void OutputFrame(MediaData* aData)
- {
- if (!aData) {
- mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
- return;
- }
-
- // Frames come out in DTS order but we need to output them in PTS order.
- mReorderQueue.Push(aData);
-
- while (mReorderQueue.Length() > mMaxRefFrames) {
- mCallback->Output(mReorderQueue.Pop().get());
- }
- mCallback->InputExhausted();
- }
-
-private:
nsAutoPtr<BlankMediaDataCreator> mCreator;
- MediaDataDecoderCallback* mCallback;
const uint32_t mMaxRefFrames;
ReorderQueue mReorderQueue;
TrackInfo::TrackType mType;
};
class BlankVideoDataCreator {
public:
BlankVideoDataCreator(uint32_t aFrameWidth,
--- a/dom/media/platforms/agnostic/OpusDecoder.cpp
+++ b/dom/media/platforms/agnostic/OpusDecoder.cpp
@@ -23,37 +23,39 @@ extern "C" {
#define OPUS_DEBUG(arg, ...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, \
("OpusDataDecoder(%p)::%s: " arg, this, __func__, ##__VA_ARGS__))
namespace mozilla {
OpusDataDecoder::OpusDataDecoder(const CreateDecoderParams& aParams)
: mInfo(aParams.AudioConfig())
, mTaskQueue(aParams.mTaskQueue)
- , mCallback(aParams.mCallback)
, mOpusDecoder(nullptr)
, mSkip(0)
, mDecodedHeader(false)
, mPaddingDiscarded(false)
, mFrames(0)
- , mIsFlushing(false)
{
}
OpusDataDecoder::~OpusDataDecoder()
{
if (mOpusDecoder) {
opus_multistream_decoder_destroy(mOpusDecoder);
mOpusDecoder = nullptr;
}
}
-void
+RefPtr<ShutdownPromise>
OpusDataDecoder::Shutdown()
{
+ RefPtr<OpusDataDecoder> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [self]() {
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+ });
}
void
OpusDataDecoder::AppendCodecDelay(MediaByteBuffer* config, uint64_t codecDelayUS)
{
uint8_t buffer[sizeof(uint64_t)];
BigEndian::writeUint64(buffer, codecDelayUS);
config->AppendElements(buffer, sizeof(uint64_t));
@@ -137,99 +139,91 @@ OpusDataDecoder::DecodeHeader(const unsi
// Should never get here as vorbis layout is always convertible to SMPTE
// default layout.
PodCopy(mMappingTable, mOpusParser->mMappingTable, MAX_AUDIO_CHANNELS);
}
return NS_OK;
}
-void
-OpusDataDecoder::Input(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+OpusDataDecoder::Decode(MediaRawData* aSample)
{
- mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
- this, &OpusDataDecoder::ProcessDecode, aSample));
+ return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
+ &OpusDataDecoder::ProcessDecode, aSample);
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
OpusDataDecoder::ProcessDecode(MediaRawData* aSample)
{
- if (mIsFlushing) {
- return;
- }
-
- MediaResult rv = DoDecode(aSample);
- if (NS_FAILED(rv)) {
- mCallback->Error(rv);
- return;
- }
- mCallback->InputExhausted();
-}
-
-MediaResult
-OpusDataDecoder::DoDecode(MediaRawData* aSample)
-{
uint32_t channels = mOpusParser->mChannels;
if (mPaddingDiscarded) {
// Discard padding should be used only on the final packet, so
// decoding after a padding discard is invalid.
OPUS_DEBUG("Opus error, discard padding on interstitial packet");
- return MediaResult(
- NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("Discard padding on interstitial packet"));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Discard padding on interstitial packet")),
+ __func__);
}
if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime) {
// We are starting a new block.
mFrames = 0;
mLastFrameTime = Some(aSample->mTime);
}
// Maximum value is 63*2880, so there's no chance of overflow.
uint32_t frames_number = opus_packet_get_nb_frames(aSample->Data(),
aSample->Size());
if (frames_number <= 0) {
- OPUS_DEBUG("Invalid packet header: r=%ld length=%ld",
- frames_number, aSample->Size());
- return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
- RESULT_DETAIL("Invalid packet header: r=%d length=%u",
- frames_number, uint32_t(aSample->Size())));
+ OPUS_DEBUG("Invalid packet header: r=%ld length=%ld", frames_number,
+ aSample->Size());
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Invalid packet header: r=%d length=%u",
+ frames_number, uint32_t(aSample->Size()))),
+ __func__);
}
uint32_t samples = opus_packet_get_samples_per_frame(
aSample->Data(), opus_int32(mOpusParser->mRate));
-
// A valid Opus packet must be between 2.5 and 120 ms long (48kHz).
uint32_t frames = frames_number*samples;
if (frames < 120 || frames > 5760) {
OPUS_DEBUG("Invalid packet frames: %u", frames);
- return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
- RESULT_DETAIL("Invalid packet frames:%u", frames));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Invalid packet frames:%u", frames)),
+ __func__);
}
AlignedAudioBuffer buffer(frames * channels);
if (!buffer) {
- return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
}
// Decode to the appropriate sample type.
#ifdef MOZ_SAMPLE_TYPE_FLOAT32
int ret = opus_multistream_decode_float(mOpusDecoder,
aSample->Data(), aSample->Size(),
buffer.get(), frames, false);
#else
int ret = opus_multistream_decode(mOpusDecoder,
aSample->Data(), aSample->Size(),
buffer.get(), frames, false);
#endif
if (ret < 0) {
- return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
- RESULT_DETAIL("Opus decoding error:%d", ret));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Opus decoding error:%d", ret)),
+ __func__);
}
NS_ASSERTION(uint32_t(ret) == frames, "Opus decoded too few audio samples");
CheckedInt64 startTime = aSample->mTime;
// Trim the initial frames while the decoder is settling.
if (mSkip > 0) {
int32_t skipFrames = std::min<int32_t>(mSkip, frames);
int32_t keepFrames = frames - skipFrames;
@@ -244,20 +238,22 @@ OpusDataDecoder::DoDecode(MediaRawData*
if (aSample->mDiscardPadding > 0) {
OPUS_DEBUG("Opus decoder discarding %u of %u frames",
aSample->mDiscardPadding, frames);
// Padding discard is only supposed to happen on the final packet.
// Record the discard so we can return an error if another packet is
// decoded.
if (aSample->mDiscardPadding > frames) {
- // Discarding more than the entire packet is invalid.
+ // Discarding more than the entire packet is invalid.
OPUS_DEBUG("Opus error, discard padding larger than packet");
- return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("Discard padding larger than packet"));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Discard padding larger than packet")),
+ __func__);
}
mPaddingDiscarded = true;
frames = frames - aSample->mDiscardPadding;
}
// Apply the header gain if one was specified.
#ifdef MOZ_SAMPLE_TYPE_FLOAT32
@@ -276,69 +272,69 @@ OpusDataDecoder::DoDecode(MediaRawData*
int32_t val = static_cast<int32_t>((gain_Q16*buffer[i] + 32768)>>16);
buffer[i] = static_cast<AudioDataValue>(MOZ_CLIP_TO_15(val));
}
}
#endif
CheckedInt64 duration = FramesToUsecs(frames, mOpusParser->mRate);
if (!duration.isValid()) {
- return MediaResult(
- NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
- RESULT_DETAIL("Overflow converting WebM audio duration"));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Overflow converting WebM audio duration")),
+ __func__);
}
- CheckedInt64 time =
- startTime - FramesToUsecs(mOpusParser->mPreSkip, mOpusParser->mRate) +
- FramesToUsecs(mFrames, mOpusParser->mRate);
+ CheckedInt64 time = startTime -
+ FramesToUsecs(mOpusParser->mPreSkip, mOpusParser->mRate) +
+ FramesToUsecs(mFrames, mOpusParser->mRate);
if (!time.isValid()) {
- return MediaResult(
- NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
- RESULT_DETAIL("Overflow shifting tstamp by codec delay"));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Overflow shifting tstamp by codec delay")),
+ __func__);
};
- mCallback->Output(new AudioData(aSample->mOffset,
- time.value(),
- duration.value(),
- frames,
- Move(buffer),
- mOpusParser->mChannels,
- mOpusParser->mRate));
+
mFrames += frames;
- return NS_OK;
+
+ return DecodePromise::CreateAndResolve(
+ DecodedData{ new AudioData(aSample->mOffset, time.value(), duration.value(),
+ frames, Move(buffer), mOpusParser->mChannels,
+ mOpusParser->mRate) },
+ __func__);
}
-void
-OpusDataDecoder::ProcessDrain()
+RefPtr<MediaDataDecoder::DecodePromise>
+OpusDataDecoder::Drain()
{
- mCallback->DrainComplete();
+ RefPtr<OpusDataDecoder> self = this;
+ // InvokeAsync dispatches a task that will be run after any pending decode
+ // completes. As such, once the drain task run, there's nothing more to do.
+ return InvokeAsync(mTaskQueue, __func__, [] {
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
+ });
}
-void
-OpusDataDecoder::Drain()
-{
- mTaskQueue->Dispatch(NewRunnableMethod(this, &OpusDataDecoder::ProcessDrain));
-}
-
-void
+RefPtr<MediaDataDecoder::FlushPromise>
OpusDataDecoder::Flush()
{
if (!mOpusDecoder) {
- return;
+ return FlushPromise::CreateAndResolve(true, __func__);
}
- mIsFlushing = true;
- nsCOMPtr<nsIRunnable> runnable = NS_NewRunnableFunction([this] () {
+
+ RefPtr<OpusDataDecoder> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [self, this]() {
MOZ_ASSERT(mOpusDecoder);
// Reset the decoder.
opus_multistream_decoder_ctl(mOpusDecoder, OPUS_RESET_STATE);
mSkip = mOpusParser->mPreSkip;
mPaddingDiscarded = false;
mLastFrameTime.reset();
+ return FlushPromise::CreateAndResolve(true, __func__);
});
- SyncRunnable::DispatchToThread(mTaskQueue, runnable);
- mIsFlushing = false;
}
/* static */
bool
OpusDataDecoder::IsOpus(const nsACString& aMimeType)
{
return aMimeType.EqualsLiteral("audio/opus");
}
--- a/dom/media/platforms/agnostic/OpusDecoder.h
+++ b/dom/media/platforms/agnostic/OpusDecoder.h
@@ -19,20 +19,20 @@ class OpusParser;
class OpusDataDecoder : public MediaDataDecoder
{
public:
explicit OpusDataDecoder(const CreateDecoderParams& aParams);
~OpusDataDecoder();
RefPtr<InitPromise> Init() override;
- void Input(MediaRawData* aSample) override;
- void Flush() override;
- void Drain() override;
- void Shutdown() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
const char* GetDescriptionName() const override
{
return "opus audio decoder";
}
// Return true if mimetype is Opus
static bool IsOpus(const nsACString& aMimeType);
@@ -41,36 +41,31 @@ public:
// from the container (if any) and to precede the OpusHead
// block in the CodecSpecificConfig buffer to verify the
// values match.
static void AppendCodecDelay(MediaByteBuffer* config, uint64_t codecDelayUS);
private:
nsresult DecodeHeader(const unsigned char* aData, size_t aLength);
- void ProcessDecode(MediaRawData* aSample);
- MediaResult DoDecode(MediaRawData* aSample);
- void ProcessDrain();
+ RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
const AudioInfo& mInfo;
const RefPtr<TaskQueue> mTaskQueue;
- MediaDataDecoderCallback* mCallback;
// Opus decoder state
nsAutoPtr<OpusParser> mOpusParser;
OpusMSDecoder* mOpusDecoder;
uint16_t mSkip; // Samples left to trim before playback.
bool mDecodedHeader;
// Opus padding should only be discarded on the final packet. Once this
// is set to true, if the reader attempts to decode any further packets it
// will raise an error so we can indicate that the file is invalid.
bool mPaddingDiscarded;
int64_t mFrames;
Maybe<int64_t> mLastFrameTime;
uint8_t mMappingTable[MAX_AUDIO_CHANNELS]; // Channel mapping table.
-
- Atomic<bool> mIsFlushing;
};
} // namespace mozilla
#endif
--- a/dom/media/platforms/agnostic/TheoraDecoder.cpp
+++ b/dom/media/platforms/agnostic/TheoraDecoder.cpp
@@ -35,41 +35,43 @@ ogg_packet InitTheoraPacket(const unsign
packet.granulepos = aGranulepos;
packet.packetno = aPacketNo;
return packet;
}
TheoraDecoder::TheoraDecoder(const CreateDecoderParams& aParams)
: mImageContainer(aParams.mImageContainer)
, mTaskQueue(aParams.mTaskQueue)
- , mCallback(aParams.mCallback)
- , mIsFlushing(false)
, mTheoraSetupInfo(nullptr)
, mTheoraDecoderContext(nullptr)
, mPacketCount(0)
, mInfo(aParams.VideoConfig())
{
MOZ_COUNT_CTOR(TheoraDecoder);
}
TheoraDecoder::~TheoraDecoder()
{
MOZ_COUNT_DTOR(TheoraDecoder);
th_setup_free(mTheoraSetupInfo);
th_comment_clear(&mTheoraComment);
th_info_clear(&mTheoraInfo);
}
-void
+RefPtr<ShutdownPromise>
TheoraDecoder::Shutdown()
{
- if (mTheoraDecoderContext) {
- th_decode_free(mTheoraDecoderContext);
- mTheoraDecoderContext = nullptr;
- }
+ RefPtr<TheoraDecoder> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [self, this]() {
+ if (mTheoraDecoderContext) {
+ th_decode_free(mTheoraDecoderContext);
+ mTheoraDecoderContext = nullptr;
+ }
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+ });
}
RefPtr<MediaDataDecoder::InitPromise>
TheoraDecoder::Init()
{
th_comment_init(&mTheoraComment);
th_info_init(&mTheoraInfo);
@@ -93,43 +95,39 @@ TheoraDecoder::Init()
if (mTheoraDecoderContext) {
return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
} else {
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
}
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
TheoraDecoder::Flush()
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
- mIsFlushing = true;
- nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([this] () {
- // nothing to do for now.
+ return InvokeAsync(mTaskQueue, __func__, []() {
+ return FlushPromise::CreateAndResolve(true, __func__);
});
- SyncRunnable::DispatchToThread(mTaskQueue, r);
- mIsFlushing = false;
}
nsresult
TheoraDecoder::DoDecodeHeader(const unsigned char* aData, size_t aLength)
{
bool bos = mPacketCount == 0;
ogg_packet pkt = InitTheoraPacket(aData, aLength, bos, false, 0, mPacketCount++);
int r = th_decode_headerin(&mTheoraInfo,
&mTheoraComment,
&mTheoraSetupInfo,
&pkt);
return r > 0 ? NS_OK : NS_ERROR_FAILURE;
}
-MediaResult
-TheoraDecoder::DoDecode(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+TheoraDecoder::ProcessDecode(MediaRawData* aSample)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
const unsigned char* aData = aSample->Data();
size_t aLength = aSample->Size();
bool bos = mPacketCount == 0;
ogg_packet pkt = InitTheoraPacket(aData, aLength, bos, false, aSample->mTimecode, mPacketCount++);
@@ -176,62 +174,43 @@ TheoraDecoder::DoDecode(MediaRawData* aS
aSample->mKeyframe,
aSample->mTimecode,
mInfo.ScaledImageRect(mTheoraInfo.frame_width,
mTheoraInfo.frame_height));
if (!v) {
LOG("Image allocation error source %ldx%ld display %ldx%ld picture %ldx%ld",
mTheoraInfo.frame_width, mTheoraInfo.frame_height, mInfo.mDisplay.width, mInfo.mDisplay.height,
mInfo.mImage.width, mInfo.mImage.height);
- return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("Insufficient memory")),
+ __func__);
}
- mCallback->Output(v);
- return NS_OK;
- } else {
- LOG("Theora Decode error: %d", ret);
- return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
- RESULT_DETAIL("Theora decode error:%d", ret));
+ return DecodePromise::CreateAndResolve(DecodedData{v}, __func__);
}
+ LOG("Theora Decode error: %d", ret);
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Theora decode error:%d", ret)),
+ __func__);
}
-void
-TheoraDecoder::ProcessDecode(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+TheoraDecoder::Decode(MediaRawData* aSample)
{
- MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
- if (mIsFlushing) {
- return;
- }
- MediaResult rv = DoDecode(aSample);
- if (NS_FAILED(rv)) {
- mCallback->Error(rv);
- } else {
- mCallback->InputExhausted();
- }
+ return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
+ &TheoraDecoder::ProcessDecode, aSample);
}
-void
-TheoraDecoder::Input(MediaRawData* aSample)
-{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
- mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
- this, &TheoraDecoder::ProcessDecode, aSample));
-}
-
-void
-TheoraDecoder::ProcessDrain()
-{
- MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
- mCallback->DrainComplete();
-}
-
-void
+RefPtr<MediaDataDecoder::DecodePromise>
TheoraDecoder::Drain()
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
- mTaskQueue->Dispatch(NewRunnableMethod(this, &TheoraDecoder::ProcessDrain));
+ return InvokeAsync(mTaskQueue, __func__, [] {
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
+ });
}
/* static */
bool
TheoraDecoder::IsTheora(const nsACString& aMimeType)
{
return aMimeType.EqualsLiteral("video/theora");
}
--- a/dom/media/platforms/agnostic/TheoraDecoder.h
+++ b/dom/media/platforms/agnostic/TheoraDecoder.h
@@ -19,40 +19,36 @@ namespace mozilla {
class TheoraDecoder : public MediaDataDecoder
{
public:
explicit TheoraDecoder(const CreateDecoderParams& aParams);
~TheoraDecoder();
RefPtr<InitPromise> Init() override;
- void Input(MediaRawData* aSample) override;
- void Flush() override;
- void Drain() override;
- void Shutdown() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
// Return true if mimetype is a Theora codec
static bool IsTheora(const nsACString& aMimeType);
const char* GetDescriptionName() const override
{
return "theora video decoder";
}
private:
nsresult DoDecodeHeader(const unsigned char* aData, size_t aLength);
- void ProcessDecode(MediaRawData* aSample);
- MediaResult DoDecode(MediaRawData* aSample);
- void ProcessDrain();
+ RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
RefPtr<ImageContainer> mImageContainer;
RefPtr<TaskQueue> mTaskQueue;
- MediaDataDecoderCallback* mCallback;
- Atomic<bool> mIsFlushing;
// Theora header & decoder state
th_info mTheoraInfo;
th_comment mTheoraComment;
th_setup_info *mTheoraSetupInfo;
th_dec_ctx *mTheoraDecoderContext;
int mPacketCount;
--- a/dom/media/platforms/agnostic/VPXDecoder.cpp
+++ b/dom/media/platforms/agnostic/VPXDecoder.cpp
@@ -64,36 +64,38 @@ InitContext(vpx_codec_ctx_t* aCtx,
return NS_ERROR_FAILURE;
}
return NS_OK;
}
VPXDecoder::VPXDecoder(const CreateDecoderParams& aParams)
: mImageContainer(aParams.mImageContainer)
, mTaskQueue(aParams.mTaskQueue)
- , mCallback(aParams.mCallback)
- , mIsFlushing(false)
, mInfo(aParams.VideoConfig())
, mCodec(MimeTypeToCodec(aParams.VideoConfig().mMimeType))
{
MOZ_COUNT_CTOR(VPXDecoder);
PodZero(&mVPX);
PodZero(&mVPXAlpha);
}
VPXDecoder::~VPXDecoder()
{
MOZ_COUNT_DTOR(VPXDecoder);
}
-void
+RefPtr<ShutdownPromise>
VPXDecoder::Shutdown()
{
- vpx_codec_destroy(&mVPX);
- vpx_codec_destroy(&mVPXAlpha);
+ RefPtr<VPXDecoder> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [self, this]() {
+ vpx_codec_destroy(&mVPX);
+ vpx_codec_destroy(&mVPXAlpha);
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+ });
}
RefPtr<MediaDataDecoder::InitPromise>
VPXDecoder::Init()
{
if (NS_FAILED(InitContext(&mVPX, mInfo, mCodec))) {
return VPXDecoder::InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
__func__);
@@ -103,69 +105,68 @@ VPXDecoder::Init()
return VPXDecoder::InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
__func__);
}
}
return VPXDecoder::InitPromise::CreateAndResolve(TrackInfo::kVideoTrack,
__func__);
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
VPXDecoder::Flush()
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
- mIsFlushing = true;
- nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([this] () {
- // nothing to do for now.
+ return InvokeAsync(mTaskQueue, __func__, []() {
+ return FlushPromise::CreateAndResolve(true, __func__);
});
- SyncRunnable::DispatchToThread(mTaskQueue, r);
- mIsFlushing = false;
}
-MediaResult
-VPXDecoder::DoDecode(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+VPXDecoder::ProcessDecode(MediaRawData* aSample)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
#if defined(DEBUG)
vpx_codec_stream_info_t si;
PodZero(&si);
si.sz = sizeof(si);
if (mCodec == Codec::VP8) {
vpx_codec_peek_stream_info(vpx_codec_vp8_dx(), aSample->Data(), aSample->Size(), &si);
} else if (mCodec == Codec::VP9) {
vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), aSample->Data(), aSample->Size(), &si);
}
NS_ASSERTION(bool(si.is_kf) == aSample->mKeyframe,
"VPX Decode Keyframe error sample->mKeyframe and si.si_kf out of sync");
#endif
if (vpx_codec_err_t r = vpx_codec_decode(&mVPX, aSample->Data(), aSample->Size(), nullptr, 0)) {
LOG("VPX Decode error: %s", vpx_codec_err_to_string(r));
- return MediaResult(
- NS_ERROR_DOM_MEDIA_DECODE_ERR,
- RESULT_DETAIL("VPX error: %s", vpx_codec_err_to_string(r)));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("VPX error: %s", vpx_codec_err_to_string(r))),
+ __func__);
}
vpx_codec_iter_t iter = nullptr;
vpx_image_t *img;
vpx_image_t *img_alpha = nullptr;
bool alpha_decoded = false;
+ DecodedData results;
while ((img = vpx_codec_get_frame(&mVPX, &iter))) {
NS_ASSERTION(img->fmt == VPX_IMG_FMT_I420 ||
img->fmt == VPX_IMG_FMT_I444,
"WebM image format not I420 or I444");
NS_ASSERTION(!alpha_decoded,
"Multiple frames per packet that contains alpha");
if (aSample->AlphaSize() > 0) {
- if(!alpha_decoded){
+ if (!alpha_decoded){
MediaResult rv = DecodeAlpha(&img_alpha, aSample);
if (NS_FAILED(rv)) {
- return(rv);
+ return DecodePromise::CreateAndReject(rv, __func__);
}
alpha_decoded = true;
}
}
// Chroma shifts are rounded down as per the decoding examples in the SDK
VideoData::YCbCrBuffer b;
b.mPlanes[0].mData = img->planes[0];
b.mPlanes[0].mStride = img->stride[0];
@@ -190,18 +191,20 @@ VPXDecoder::DoDecode(MediaRawData* aSamp
} else if (img->fmt == VPX_IMG_FMT_I444) {
b.mPlanes[1].mHeight = img->d_h;
b.mPlanes[1].mWidth = img->d_w;
b.mPlanes[2].mHeight = img->d_h;
b.mPlanes[2].mWidth = img->d_w;
} else {
LOG("VPX Unknown image format");
- return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
- RESULT_DETAIL("VPX Unknown image format"));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("VPX Unknown image format")),
+ __func__);
}
RefPtr<VideoData> v;
if (!img_alpha) {
v = VideoData::CreateAndCopyData(mInfo,
mImageContainer,
aSample->mOffset,
aSample->mTime,
@@ -228,66 +231,45 @@ VPXDecoder::DoDecode(MediaRawData* aSamp
aSample->mKeyframe,
aSample->mTimecode,
mInfo.ScaledImageRect(img->d_w,
img->d_h));
}
if (!v) {
- LOG("Image allocation error source %ldx%ld display %ldx%ld picture %ldx%ld",
- img->d_w, img->d_h, mInfo.mDisplay.width, mInfo.mDisplay.height,
- mInfo.mImage.width, mInfo.mImage.height);
- return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ LOG(
+ "Image allocation error source %ldx%ld display %ldx%ld picture %ldx%ld",
+ img->d_w, img->d_h, mInfo.mDisplay.width, mInfo.mDisplay.height,
+ mInfo.mImage.width, mInfo.mImage.height);
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
}
- mCallback->Output(v);
+ results.AppendElement(Move(v));
}
- return NS_OK;
+ return DecodePromise::CreateAndResolve(Move(results), __func__);
}
-void
-VPXDecoder::ProcessDecode(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+VPXDecoder::Decode(MediaRawData* aSample)
{
- MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
- if (mIsFlushing) {
- return;
- }
- MediaResult rv = DoDecode(aSample);
- if (NS_FAILED(rv)) {
- mCallback->Error(rv);
- } else {
- mCallback->InputExhausted();
- }
+ return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
+ &VPXDecoder::ProcessDecode, aSample);
}
-void
-VPXDecoder::Input(MediaRawData* aSample)
-{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
- mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
- this, &VPXDecoder::ProcessDecode, aSample));
-}
-
-void
-VPXDecoder::ProcessDrain()
-{
- MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
- mCallback->DrainComplete();
-}
-
-void
+RefPtr<MediaDataDecoder::DecodePromise>
VPXDecoder::Drain()
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
- mTaskQueue->Dispatch(NewRunnableMethod(this, &VPXDecoder::ProcessDrain));
+ return InvokeAsync(mTaskQueue, __func__, [] {
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
+ });
}
MediaResult
-VPXDecoder::DecodeAlpha(vpx_image_t** aImgAlpha,
- MediaRawData* aSample)
+VPXDecoder::DecodeAlpha(vpx_image_t** aImgAlpha, const MediaRawData* aSample)
{
vpx_codec_err_t r = vpx_codec_decode(&mVPXAlpha,
aSample->AlphaData(),
aSample->AlphaSize(),
nullptr,
0);
if (r) {
LOG("VPX decode alpha error: %s", vpx_codec_err_to_string(r));
--- a/dom/media/platforms/agnostic/VPXDecoder.h
+++ b/dom/media/platforms/agnostic/VPXDecoder.h
@@ -20,48 +20,44 @@ using namespace layers;
class VPXDecoder : public MediaDataDecoder
{
public:
explicit VPXDecoder(const CreateDecoderParams& aParams);
~VPXDecoder();
RefPtr<InitPromise> Init() override;
- void Input(MediaRawData* aSample) override;
- void Flush() override;
- void Drain() override;
- void Shutdown() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
const char* GetDescriptionName() const override
{
return "libvpx video decoder";
}
- enum Codec: uint8_t {
+ enum Codec: uint8_t
+ {
VP8 = 1 << 0,
VP9 = 1 << 1
};
// Return true if aMimeType is a one of the strings used by our demuxers to
// identify VPX of the specified type. Does not parse general content type
// strings, i.e. white space matters.
static bool IsVPX(const nsACString& aMimeType, uint8_t aCodecMask=VP8|VP9);
static bool IsVP8(const nsACString& aMimeType);
static bool IsVP9(const nsACString& aMimeType);
private:
- void ProcessDecode(MediaRawData* aSample);
- MediaResult DoDecode(MediaRawData* aSample);
- void ProcessDrain();
- MediaResult DecodeAlpha(vpx_image_t** aImgAlpha,
- MediaRawData* aSample);
+ RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
+ MediaResult DecodeAlpha(vpx_image_t** aImgAlpha, const MediaRawData* aSample);
const RefPtr<ImageContainer> mImageContainer;
const RefPtr<TaskQueue> mTaskQueue;
- MediaDataDecoderCallback* mCallback;
- Atomic<bool> mIsFlushing;
// VPx decoder state
vpx_codec_ctx_t mVPX;
// VPx alpha decoder state
vpx_codec_ctx_t mVPXAlpha;
const VideoInfo& mInfo;
--- a/dom/media/platforms/agnostic/VorbisDecoder.cpp
+++ b/dom/media/platforms/agnostic/VorbisDecoder.cpp
@@ -28,20 +28,18 @@ ogg_packet InitVorbisPacket(const unsign
packet.granulepos = aGranulepos;
packet.packetno = aPacketNo;
return packet;
}
VorbisDataDecoder::VorbisDataDecoder(const CreateDecoderParams& aParams)
: mInfo(aParams.AudioConfig())
, mTaskQueue(aParams.mTaskQueue)
- , mCallback(aParams.mCallback)
, mPacketCount(0)
, mFrames(0)
- , mIsFlushing(false)
{
// Zero these member vars to avoid crashes in Vorbis clear functions when
// destructor is called before |Init|.
PodZero(&mVorbisBlock);
PodZero(&mVorbisDsp);
PodZero(&mVorbisInfo);
PodZero(&mVorbisComment);
}
@@ -49,19 +47,23 @@ VorbisDataDecoder::VorbisDataDecoder(con
VorbisDataDecoder::~VorbisDataDecoder()
{
vorbis_block_clear(&mVorbisBlock);
vorbis_dsp_clear(&mVorbisDsp);
vorbis_info_clear(&mVorbisInfo);
vorbis_comment_clear(&mVorbisComment);
}
-void
+RefPtr<ShutdownPromise>
VorbisDataDecoder::Shutdown()
{
+ RefPtr<VorbisDataDecoder> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [self]() {
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+ });
}
RefPtr<MediaDataDecoder::InitPromise>
VorbisDataDecoder::Init()
{
vorbis_info_init(&mVorbisInfo);
vorbis_comment_init(&mVorbisComment);
PodZero(&mVorbisDsp);
@@ -117,44 +119,27 @@ VorbisDataDecoder::DecodeHeader(const un
MOZ_ASSERT(mPacketCount <= 3);
int r = vorbis_synthesis_headerin(&mVorbisInfo,
&mVorbisComment,
&pkt);
return r == 0 ? NS_OK : NS_ERROR_FAILURE;
}
-void
-VorbisDataDecoder::Input(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+VorbisDataDecoder::Decode(MediaRawData* aSample)
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
- mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
- this, &VorbisDataDecoder::ProcessDecode, aSample));
+ return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
+ &VorbisDataDecoder::ProcessDecode, aSample);
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
VorbisDataDecoder::ProcessDecode(MediaRawData* aSample)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
- if (mIsFlushing) {
- return;
- }
-
- MediaResult rv = DoDecode(aSample);
- if (NS_FAILED(rv)) {
- mCallback->Error(rv);
- } else {
- mCallback->InputExhausted();
- }
-}
-
-MediaResult
-VorbisDataDecoder::DoDecode(MediaRawData* aSample)
-{
- MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
const unsigned char* aData = aSample->Data();
size_t aLength = aSample->Size();
int64_t aOffset = aSample->mOffset;
int64_t aTstampUsecs = aSample->mTime;
int64_t aTotalFrames = 0;
MOZ_ASSERT(mPacketCount >= 3);
@@ -165,128 +150,130 @@ VorbisDataDecoder::DoDecode(MediaRawData
mLastFrameTime = Some(aSample->mTime);
}
ogg_packet pkt = InitVorbisPacket(aData, aLength, false, aSample->mEOS,
aSample->mTimecode, mPacketCount++);
int err = vorbis_synthesis(&mVorbisBlock, &pkt);
if (err) {
- return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
- RESULT_DETAIL("vorbis_synthesis:%d", err));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("vorbis_synthesis:%d", err)),
+ __func__);
}
err = vorbis_synthesis_blockin(&mVorbisDsp, &mVorbisBlock);
if (err) {
- return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
- RESULT_DETAIL("vorbis_synthesis_blockin:%d", err));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("vorbis_synthesis_blockin:%d", err)),
+ __func__);
}
VorbisPCMValue** pcm = 0;
int32_t frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
if (frames == 0) {
- return NS_OK;
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
}
+
+ DecodedData results;
while (frames > 0) {
uint32_t channels = mVorbisDsp.vi->channels;
uint32_t rate = mVorbisDsp.vi->rate;
AlignedAudioBuffer buffer(frames*channels);
if (!buffer) {
- return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
}
for (uint32_t j = 0; j < channels; ++j) {
VorbisPCMValue* channel = pcm[j];
for (uint32_t i = 0; i < uint32_t(frames); ++i) {
buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
}
}
CheckedInt64 duration = FramesToUsecs(frames, rate);
if (!duration.isValid()) {
- return MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
- RESULT_DETAIL("Overflow converting audio duration"));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Overflow converting audio duration")),
+ __func__);
}
CheckedInt64 total_duration = FramesToUsecs(mFrames, rate);
if (!total_duration.isValid()) {
- return MediaResult(
- NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
- RESULT_DETAIL("Overflow converting audio total_duration"));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Overflow converting audio total_duration")),
+ __func__);
}
CheckedInt64 time = total_duration + aTstampUsecs;
if (!time.isValid()) {
- return MediaResult(
- NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
- RESULT_DETAIL("Overflow adding total_duration and aTstampUsecs"));
+ return DecodePromise::CreateAndReject(
+ MediaResult(
+ NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Overflow adding total_duration and aTstampUsecs")),
+ __func__);
};
if (!mAudioConverter) {
AudioConfig in(AudioConfig::ChannelLayout(channels, VorbisLayout(channels)),
rate);
AudioConfig out(channels, rate);
if (!in.IsValid() || !out.IsValid()) {
- return MediaResult(
- NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("Invalid channel layout:%u", channels));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Invalid channel layout:%u", channels)),
+ __func__);
}
mAudioConverter = MakeUnique<AudioConverter>(in, out);
}
MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
AudioSampleBuffer data(Move(buffer));
data = mAudioConverter->Process(Move(data));
aTotalFrames += frames;
- mCallback->Output(new AudioData(aOffset,
- time.value(),
- duration.value(),
- frames,
- data.Forget(),
- channels,
- rate));
+
+ results.AppendElement(new AudioData(aOffset, time.value(), duration.value(),
+ frames, data.Forget(), channels, rate));
mFrames += frames;
err = vorbis_synthesis_read(&mVorbisDsp, frames);
if (err) {
- return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
- RESULT_DETAIL("vorbis_synthesis_read:%d", err));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("vorbis_synthesis_read:%d", err)),
+ __func__);
}
frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
}
-
- return NS_OK;
-}
-
-void
-VorbisDataDecoder::ProcessDrain()
-{
- MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
- mCallback->DrainComplete();
+ return DecodePromise::CreateAndResolve(Move(results), __func__);
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
VorbisDataDecoder::Drain()
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
- mTaskQueue->Dispatch(NewRunnableMethod(this, &VorbisDataDecoder::ProcessDrain));
+ return InvokeAsync(mTaskQueue, __func__, [] {
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
+ });
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
VorbisDataDecoder::Flush()
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
- mIsFlushing = true;
- nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([this] () {
+ RefPtr<VorbisDataDecoder> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [self, this]() {
// Ignore failed results from vorbis_synthesis_restart. They
// aren't fatal and it fails when ResetDecode is called at a
// time when no vorbis data has been read.
vorbis_synthesis_restart(&mVorbisDsp);
mLastFrameTime.reset();
+ return FlushPromise::CreateAndResolve(true, __func__);
});
- SyncRunnable::DispatchToThread(mTaskQueue, r);
- mIsFlushing = false;
}
/* static */
bool
VorbisDataDecoder::IsVorbis(const nsACString& aMimeType)
{
return aMimeType.EqualsLiteral("audio/vorbis");
}
--- a/dom/media/platforms/agnostic/VorbisDecoder.h
+++ b/dom/media/platforms/agnostic/VorbisDecoder.h
@@ -20,47 +20,42 @@ namespace mozilla {
class VorbisDataDecoder : public MediaDataDecoder
{
public:
explicit VorbisDataDecoder(const CreateDecoderParams& aParams);
~VorbisDataDecoder();
RefPtr<InitPromise> Init() override;
- void Input(MediaRawData* aSample) override;
- void Flush() override;
- void Drain() override;
- void Shutdown() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
const char* GetDescriptionName() const override
{
return "vorbis audio decoder";
}
// Return true if mimetype is Vorbis
static bool IsVorbis(const nsACString& aMimeType);
static const AudioConfig::Channel* VorbisLayout(uint32_t aChannels);
private:
nsresult DecodeHeader(const unsigned char* aData, size_t aLength);
-
- void ProcessDecode(MediaRawData* aSample);
- MediaResult DoDecode(MediaRawData* aSample);
- void ProcessDrain();
+ RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
const AudioInfo& mInfo;
const RefPtr<TaskQueue> mTaskQueue;
- MediaDataDecoderCallback* mCallback;
// Vorbis decoder state
vorbis_info mVorbisInfo;
vorbis_comment mVorbisComment;
vorbis_dsp_state mVorbisDsp;
vorbis_block mVorbisBlock;
int64_t mPacketCount;
int64_t mFrames;
Maybe<int64_t> mLastFrameTime;
UniquePtr<AudioConverter> mAudioConverter;
- Atomic<bool> mIsFlushing;
};
} // namespace mozilla
#endif
--- a/dom/media/platforms/agnostic/WAVDecoder.cpp
+++ b/dom/media/platforms/agnostic/WAVDecoder.cpp
@@ -42,55 +42,56 @@ DecodeULawSample(uint8_t aValue)
uint8_t exponent = (aValue & 0x70) >> 4;
uint8_t mantissa = aValue & 0x0F;
int16_t sample = (33 + 2 * mantissa) * (2 << (exponent + 1)) - 33;
return sign * sample;
}
WaveDataDecoder::WaveDataDecoder(const CreateDecoderParams& aParams)
: mInfo(aParams.AudioConfig())
- , mCallback(aParams.mCallback)
+ , mTaskQueue(aParams.mTaskQueue)
{
}
-void
+RefPtr<ShutdownPromise>
WaveDataDecoder::Shutdown()
{
+ RefPtr<WaveDataDecoder> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [self]() {
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+ });
}
RefPtr<MediaDataDecoder::InitPromise>
WaveDataDecoder::Init()
{
return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__);
}
-void
-WaveDataDecoder::Input(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+WaveDataDecoder::Decode(MediaRawData* aSample)
{
- MediaResult rv = DoDecode(aSample);
- if (NS_FAILED(rv)) {
- mCallback->Error(rv);
- } else {
- mCallback->InputExhausted();
- }
+ return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
+ &WaveDataDecoder::ProcessDecode, aSample);
}
-MediaResult
-WaveDataDecoder::DoDecode(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+WaveDataDecoder::ProcessDecode(MediaRawData* aSample)
{
size_t aLength = aSample->Size();
ByteReader aReader(aSample->Data(), aLength);
int64_t aOffset = aSample->mOffset;
uint64_t aTstampUsecs = aSample->mTime;
int32_t frames = aLength * 8 / mInfo.mBitDepth / mInfo.mChannels;
AlignedAudioBuffer buffer(frames * mInfo.mChannels);
if (!buffer) {
- return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
}
for (int i = 0; i < frames; ++i) {
for (unsigned int j = 0; j < mInfo.mChannels; ++j) {
if (mInfo.mProfile == 6) { //ALAW Data
uint8_t v = aReader.ReadU8();
int16_t decoded = DecodeALawSample(v);
buffer[i * mInfo.mChannels + j] =
IntegerToAudioSample<AudioDataValue>(decoded);
@@ -114,36 +115,36 @@ WaveDataDecoder::DoDecode(MediaRawData*
Int24bitToAudioSample<AudioDataValue>(v);
}
}
}
}
int64_t duration = frames / mInfo.mRate;
- mCallback->Output(new AudioData(aOffset,
- aTstampUsecs,
- duration,
- frames,
- Move(buffer),
- mInfo.mChannels,
- mInfo.mRate));
-
- return NS_OK;
+ return DecodePromise::CreateAndResolve(
+ DecodedData{ new AudioData(aOffset, aTstampUsecs, duration, frames,
+ Move(buffer), mInfo.mChannels, mInfo.mRate) },
+ __func__);
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
WaveDataDecoder::Drain()
{
- mCallback->DrainComplete();
+ return InvokeAsync(mTaskQueue, __func__, [] {
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
+ });
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
WaveDataDecoder::Flush()
{
+ return InvokeAsync(mTaskQueue, __func__, []() {
+ return FlushPromise::CreateAndResolve(true, __func__);
+ });
}
/* static */
bool
WaveDataDecoder::IsWave(const nsACString& aMimeType)
{
// Some WebAudio uses "audio/x-wav",
// WAVdemuxer uses "audio/wave; codecs=aNum".
--- a/dom/media/platforms/agnostic/WAVDecoder.h
+++ b/dom/media/platforms/agnostic/WAVDecoder.h
@@ -16,26 +16,25 @@ class WaveDataDecoder : public MediaData
{
public:
explicit WaveDataDecoder(const CreateDecoderParams& aParams);
// Return true if mimetype is Wave
static bool IsWave(const nsACString& aMimeType);
RefPtr<InitPromise> Init() override;
- void Input(MediaRawData* aSample) override;
- void Flush() override;
- void Drain() override;
- void Shutdown() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
const char* GetDescriptionName() const override
{
return "wave audio decoder";
}
private:
- MediaResult DoDecode(MediaRawData* aSample);
-
+ RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
const AudioInfo& mInfo;
- MediaDataDecoderCallback* mCallback;
+ const RefPtr<TaskQueue> mTaskQueue;
};
} // namespace mozilla
#endif
--- a/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp
@@ -19,63 +19,77 @@
#include "MediaPrefs.h"
#include "mozilla/EMEUtils.h"
namespace mozilla {
typedef MozPromiseRequestHolder<CDMProxy::DecryptPromise> DecryptPromiseRequestHolder;
extern already_AddRefed<PlatformDecoderModule> CreateBlankDecoderModule();
-class EMEDecryptor : public MediaDataDecoder {
-
+class EMEDecryptor : public MediaDataDecoder
+{
public:
-
EMEDecryptor(MediaDataDecoder* aDecoder,
- MediaDataDecoderCallback* aCallback,
CDMProxy* aProxy,
TaskQueue* aDecodeTaskQueue)
: mDecoder(aDecoder)
- , mCallback(aCallback)
, mTaskQueue(aDecodeTaskQueue)
, mProxy(aProxy)
- , mSamplesWaitingForKey(new SamplesWaitingForKey(this, this->mCallback,
- mTaskQueue, mProxy))
+ , mSamplesWaitingForKey(new SamplesWaitingForKey(mProxy))
, mIsShutdown(false)
{
}
- RefPtr<InitPromise> Init() override {
+ RefPtr<InitPromise> Init() override
+ {
MOZ_ASSERT(!mIsShutdown);
return mDecoder->Init();
}
- void Input(MediaRawData* aSample) override {
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override
+ {
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ MOZ_RELEASE_ASSERT(mDecrypts.Count() == 0,
+ "Can only process one sample at a time");
+ RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
+ AttemptDecode(aSample);
+ return p;
+ }
+
+ void AttemptDecode(MediaRawData* aSample)
+ {
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
if (mIsShutdown) {
NS_WARNING("EME encrypted sample arrived after shutdown");
- return;
- }
- if (mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)) {
+ mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
return;
}
- nsAutoPtr<MediaRawDataWriter> writer(aSample->CreateWriter());
- mProxy->GetSessionIdsForKeyId(aSample->mCrypto.mKeyId,
- writer->mCrypto.mSessionIds);
+ RefPtr<EMEDecryptor> self = this;
+ mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)
+ ->Then(mTaskQueue, __func__,
+ [self, this](MediaRawData* aSample) {
+ mKeyRequest.Complete();
+ nsAutoPtr<MediaRawDataWriter> writer(aSample->CreateWriter());
+ mProxy->GetSessionIdsForKeyId(aSample->mCrypto.mKeyId,
+ writer->mCrypto.mSessionIds);
- mDecrypts.Put(aSample, new DecryptPromiseRequestHolder());
- mProxy->Decrypt(aSample)->Then(
- mTaskQueue, __func__, this,
- &EMEDecryptor::Decrypted,
- &EMEDecryptor::Decrypted)
- ->Track(*mDecrypts.Get(aSample));
- return;
+ mDecrypts.Put(aSample, new DecryptPromiseRequestHolder());
+ mProxy->Decrypt(aSample)
+ ->Then(mTaskQueue, __func__, this,
+ &EMEDecryptor::Decrypted,
+ &EMEDecryptor::Decrypted)
+ ->Track(*mDecrypts.Get(aSample));
+ },
+ [self, this]() { mKeyRequest.Complete(); })
+ ->Track(mKeyRequest);
}
- void Decrypted(const DecryptResult& aDecrypted) {
+ void Decrypted(const DecryptResult& aDecrypted)
+ {
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
MOZ_ASSERT(aDecrypted.mSample);
nsAutoPtr<DecryptPromiseRequestHolder> holder;
mDecrypts.RemoveAndForget(aDecrypted.mSample, holder);
if (holder) {
holder->Complete();
} else {
@@ -86,185 +100,242 @@ public:
if (mIsShutdown) {
NS_WARNING("EME decrypted sample arrived after shutdown");
return;
}
if (aDecrypted.mStatus == NoKeyErr) {
// Key became unusable after we sent the sample to CDM to decrypt.
- // Call Input() again, so that the sample is enqueued for decryption
+ // Call Decode() again, so that the sample is enqueued for decryption
// if the key becomes usable again.
- Input(aDecrypted.mSample);
+ AttemptDecode(aDecrypted.mSample);
} else if (aDecrypted.mStatus != Ok) {
- if (mCallback) {
- mCallback->Error(MediaResult(
+ mDecodePromise.RejectIfExists(
+ MediaResult(
NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("decrypted.mStatus=%u", uint32_t(aDecrypted.mStatus))));
- }
+ RESULT_DETAIL("decrypted.mStatus=%u", uint32_t(aDecrypted.mStatus))),
+ __func__);
} else {
MOZ_ASSERT(!mIsShutdown);
// The sample is no longer encrypted, so clear its crypto metadata.
UniquePtr<MediaRawDataWriter> writer(aDecrypted.mSample->CreateWriter());
writer->mCrypto = CryptoSample();
- mDecoder->Input(aDecrypted.mSample);
+ RefPtr<EMEDecryptor> self = this;
+ mDecoder->Decode(aDecrypted.mSample)
+ ->Then(mTaskQueue, __func__,
+ [self, this](const DecodedData& aResults) {
+ mDecodeRequest.Complete();
+ mDecodePromise.ResolveIfExists(aResults, __func__);
+ },
+ [self, this](const MediaResult& aError) {
+ mDecodeRequest.Complete();
+ mDecodePromise.RejectIfExists(aError, __func__);
+ })
+ ->Track(mDecodeRequest);
}
}
- void Flush() override {
+ RefPtr<FlushPromise> Flush() override
+ {
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
MOZ_ASSERT(!mIsShutdown);
+ mKeyRequest.DisconnectIfExists();
+ mDecodeRequest.DisconnectIfExists();
+ mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
for (auto iter = mDecrypts.Iter(); !iter.Done(); iter.Next()) {
nsAutoPtr<DecryptPromiseRequestHolder>& holder = iter.Data();
holder->DisconnectIfExists();
iter.Remove();
}
- mDecoder->Flush();
- mSamplesWaitingForKey->Flush();
+ RefPtr<EMEDecryptor> self = this;
+ return mDecoder->Flush()->Then(mTaskQueue, __func__,
+ [self, this]() {
+ mSamplesWaitingForKey->Flush();
+ },
+ [self, this]() {
+ mSamplesWaitingForKey->Flush();
+ });
}
- void Drain() override {
+ RefPtr<DecodePromise> Drain() override
+ {
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
MOZ_ASSERT(!mIsShutdown);
+ MOZ_ASSERT(mDecodePromise.IsEmpty() && !mDecodeRequest.Exists(),
+ "Must wait for decoding to complete");
for (auto iter = mDecrypts.Iter(); !iter.Done(); iter.Next()) {
nsAutoPtr<DecryptPromiseRequestHolder>& holder = iter.Data();
holder->DisconnectIfExists();
iter.Remove();
}
- mDecoder->Drain();
+ return mDecoder->Drain();
}
- void Shutdown() override {
+ RefPtr<ShutdownPromise> Shutdown() override
+ {
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
MOZ_ASSERT(!mIsShutdown);
mIsShutdown = true;
- mDecoder->Shutdown();
- mSamplesWaitingForKey->BreakCycles();
mSamplesWaitingForKey = nullptr;
- mDecoder = nullptr;
+ RefPtr<MediaDataDecoder> decoder = mDecoder.forget();
mProxy = nullptr;
- mCallback = nullptr;
+ return decoder->Shutdown();
}
- const char* GetDescriptionName() const override {
+ const char* GetDescriptionName() const override
+ {
return mDecoder->GetDescriptionName();
}
private:
-
RefPtr<MediaDataDecoder> mDecoder;
- MediaDataDecoderCallback* mCallback;
RefPtr<TaskQueue> mTaskQueue;
RefPtr<CDMProxy> mProxy;
nsClassHashtable<nsRefPtrHashKey<MediaRawData>, DecryptPromiseRequestHolder> mDecrypts;
RefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
+ MozPromiseRequestHolder<SamplesWaitingForKey::WaitForKeyPromise> mKeyRequest;
+ MozPromiseHolder<DecodePromise> mDecodePromise;
+ MozPromiseHolder<DecodePromise> mDrainPromise;
+ MozPromiseHolder<FlushPromise> mFlushPromise;
+ MozPromiseRequestHolder<DecodePromise> mDecodeRequest;
+
bool mIsShutdown;
};
-class EMEMediaDataDecoderProxy : public MediaDataDecoderProxy {
+class EMEMediaDataDecoderProxy : public MediaDataDecoderProxy
+{
public:
EMEMediaDataDecoderProxy(already_AddRefed<AbstractThread> aProxyThread,
- MediaDataDecoderCallback* aCallback,
- CDMProxy* aProxy,
- TaskQueue* aTaskQueue)
- : MediaDataDecoderProxy(Move(aProxyThread), aCallback)
- , mSamplesWaitingForKey(new SamplesWaitingForKey(this, aCallback,
- aTaskQueue, aProxy))
+ CDMProxy* aProxy)
+ : MediaDataDecoderProxy(Move(aProxyThread))
+ , mTaskQueue(AbstractThread::GetCurrent()->AsTaskQueue())
+ , mSamplesWaitingForKey(new SamplesWaitingForKey(aProxy))
, mProxy(aProxy)
{
}
- void Input(MediaRawData* aSample) override;
- void Shutdown() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
private:
+ RefPtr<TaskQueue> mTaskQueue;
RefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
+ MozPromiseRequestHolder<SamplesWaitingForKey::WaitForKeyPromise> mKeyRequest;
+ MozPromiseHolder<DecodePromise> mDecodePromise;
+ MozPromiseRequestHolder<DecodePromise> mDecodeRequest;
RefPtr<CDMProxy> mProxy;
};
-void
-EMEMediaDataDecoderProxy::Input(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+EMEMediaDataDecoderProxy::Decode(MediaRawData* aSample)
{
- if (mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)) {
- return;
- }
+ RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
+
+ RefPtr<EMEMediaDataDecoderProxy> self = this;
+ mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)
+ ->Then(mTaskQueue, __func__,
+ [self, this](MediaRawData* aSample) {
+ mKeyRequest.Complete();
- nsAutoPtr<MediaRawDataWriter> writer(aSample->CreateWriter());
- mProxy->GetSessionIdsForKeyId(aSample->mCrypto.mKeyId,
- writer->mCrypto.mSessionIds);
+ nsAutoPtr<MediaRawDataWriter> writer(aSample->CreateWriter());
+ mProxy->GetSessionIdsForKeyId(aSample->mCrypto.mKeyId,
+ writer->mCrypto.mSessionIds);
+ MediaDataDecoderProxy::Decode(aSample)
+ ->Then(mTaskQueue, __func__,
+ [self, this](const DecodedData& aResults) {
+ mDecodeRequest.Complete();
+ mDecodePromise.Resolve(aResults, __func__);
+ },
+ [self, this](const MediaResult& aError) {
+ mDecodeRequest.Complete();
+ mDecodePromise.Reject(aError, __func__);
+ })
+ ->Track(mDecodeRequest);
+ },
+ [self, this]() {
+ mKeyRequest.Complete();
+ MOZ_CRASH("Should never get here");
+ })
+ ->Track(mKeyRequest);
- MediaDataDecoderProxy::Input(aSample);
+ return p;
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
+EMEMediaDataDecoderProxy::Flush()
+{
+ mKeyRequest.DisconnectIfExists();
+ mDecodeRequest.DisconnectIfExists();
+ mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ return MediaDataDecoderProxy::Flush();
+}
+
+RefPtr<ShutdownPromise>
EMEMediaDataDecoderProxy::Shutdown()
{
- MediaDataDecoderProxy::Shutdown();
-
- mSamplesWaitingForKey->BreakCycles();
mSamplesWaitingForKey = nullptr;
mProxy = nullptr;
+ return MediaDataDecoderProxy::Shutdown();
}
EMEDecoderModule::EMEDecoderModule(CDMProxy* aProxy, PDMFactory* aPDM)
: mProxy(aProxy)
, mPDM(aPDM)
{
}
EMEDecoderModule::~EMEDecoderModule()
{
}
static already_AddRefed<MediaDataDecoderProxy>
-CreateDecoderWrapper(MediaDataDecoderCallback* aCallback, CDMProxy* aProxy, TaskQueue* aTaskQueue)
+CreateDecoderWrapper(CDMProxy* aProxy)
{
RefPtr<gmp::GeckoMediaPluginService> s(gmp::GeckoMediaPluginService::GetGeckoMediaPluginService());
if (!s) {
return nullptr;
}
RefPtr<AbstractThread> thread(s->GetAbstractGMPThread());
if (!thread) {
return nullptr;
}
RefPtr<MediaDataDecoderProxy> decoder(
- new EMEMediaDataDecoderProxy(thread.forget(), aCallback, aProxy, aTaskQueue));
+ new EMEMediaDataDecoderProxy(thread.forget(), aProxy));
return decoder.forget();
}
already_AddRefed<MediaDataDecoder>
EMEDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
{
MOZ_ASSERT(aParams.mConfig.mCrypto.mValid);
if (MediaPrefs::EMEBlankVideo()) {
EME_LOG("EMEDecoderModule::CreateVideoDecoder() creating a blank decoder.");
RefPtr<PlatformDecoderModule> m(CreateBlankDecoderModule());
return m->CreateVideoDecoder(aParams);
}
if (SupportsMimeType(aParams.mConfig.mMimeType, nullptr)) {
// GMP decodes. Assume that means it can decrypt too.
- RefPtr<MediaDataDecoderProxy> wrapper =
- CreateDecoderWrapper(aParams.mCallback, mProxy, aParams.mTaskQueue);
- auto params = GMPVideoDecoderParams(aParams).WithCallback(wrapper);
+ RefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper(mProxy);
+ auto params = GMPVideoDecoderParams(aParams);
wrapper->SetProxyTarget(new EMEVideoDecoder(mProxy, params));
return wrapper.forget();
}
MOZ_ASSERT(mPDM);
RefPtr<MediaDataDecoder> decoder(mPDM->CreateDecoder(aParams));
if (!decoder) {
return nullptr;
}
- RefPtr<MediaDataDecoder> emeDecoder(new EMEDecryptor(decoder,
- aParams.mCallback,
- mProxy,
- AbstractThread::GetCurrent()->AsTaskQueue()));
+ RefPtr<MediaDataDecoder> emeDecoder(new EMEDecryptor(
+ decoder, mProxy, AbstractThread::GetCurrent()->AsTaskQueue()));
return emeDecoder.forget();
}
already_AddRefed<MediaDataDecoder>
EMEDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
{
MOZ_ASSERT(aParams.mConfig.mCrypto.mValid);
@@ -278,20 +349,18 @@ EMEDecoderModule::CreateAudioDecoder(con
return m->CreateAudioDecoder(aParams);
}
RefPtr<MediaDataDecoder> decoder(mPDM->CreateDecoder(aParams));
if (!decoder) {
return nullptr;
}
- RefPtr<MediaDataDecoder> emeDecoder(new EMEDecryptor(decoder,
- aParams.mCallback,
- mProxy,
- AbstractThread::GetCurrent()->AsTaskQueue()));
+ RefPtr<MediaDataDecoder> emeDecoder(new EMEDecryptor(
+ decoder, mProxy, AbstractThread::GetCurrent()->AsTaskQueue()));
return emeDecoder.forget();
}
PlatformDecoderModule::ConversionRequired
EMEDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
{
if (aConfig.IsVideo() && MP4Decoder::IsH264(aConfig.mMimeType)) {
return ConversionRequired::kNeedAVCC;
--- a/dom/media/platforms/agnostic/eme/EMEDecoderModule.h
+++ b/dom/media/platforms/agnostic/eme/EMEDecoderModule.h
@@ -10,19 +10,18 @@
#include "PlatformDecoderModule.h"
#include "PDMFactory.h"
#include "gmp-decryption.h"
namespace mozilla {
class CDMProxy;
-class EMEDecoderModule : public PlatformDecoderModule {
-private:
-
+class EMEDecoderModule : public PlatformDecoderModule
+{
public:
EMEDecoderModule(CDMProxy* aProxy, PDMFactory* aPDM);
virtual ~EMEDecoderModule();
protected:
// Decode thread.
already_AddRefed<MediaDataDecoder>
@@ -38,15 +37,13 @@ protected:
bool
SupportsMimeType(const nsACString& aMimeType,
DecoderDoctorDiagnostics* aDiagnostics) const override;
private:
RefPtr<CDMProxy> mProxy;
// Will be null if CDM has decoding capability.
RefPtr<PDMFactory> mPDM;
- // We run the PDM on its own task queue.
- RefPtr<TaskQueue> mTaskQueue;
};
} // namespace mozilla
#endif // EMEDecoderModule_h_
--- a/dom/media/platforms/agnostic/eme/EMEVideoDecoder.cpp
+++ b/dom/media/platforms/agnostic/eme/EMEVideoDecoder.cpp
@@ -1,44 +1,31 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#include "mozilla/CDMProxy.h"
#include "EMEVideoDecoder.h"
#include "GMPVideoEncodedFrameImpl.h"
-#include "mozilla/CDMProxy.h"
#include "MediaData.h"
#include "MP4Decoder.h"
+#include "PlatformDecoderModule.h"
#include "VPXDecoder.h"
namespace mozilla {
-void
-EMEVideoCallbackAdapter::Error(GMPErr aErr)
-{
- if (aErr == GMPNoKeyErr) {
- // The GMP failed to decrypt a frame due to not having a key. This can
- // happen if a key expires or a session is closed during playback.
- NS_WARNING("GMP failed to decrypt due to lack of key");
- return;
- }
- VideoCallbackAdapter::Error(aErr);
-}
-
EMEVideoDecoder::EMEVideoDecoder(CDMProxy* aProxy,
const GMPVideoDecoderParams& aParams)
- : GMPVideoDecoder(GMPVideoDecoderParams(aParams).WithAdapter(
- new EMEVideoCallbackAdapter(aParams.mCallback,
- VideoInfo(aParams.mConfig.mDisplay),
- aParams.mImageContainer)))
+ : GMPVideoDecoder(GMPVideoDecoderParams(aParams))
, mProxy(aProxy)
, mDecryptorId(aProxy->GetDecryptorId())
-{}
+{
+}
void
EMEVideoDecoder::InitTags(nsTArray<nsCString>& aTags)
{
VideoInfo config = GetConfig();
if (MP4Decoder::IsH264(config.mMimeType)) {
aTags.AppendElement(NS_LITERAL_CSTRING("h264"));
} else if (VPXDecoder::IsVP8(config.mMimeType)) {
--- a/dom/media/platforms/agnostic/eme/EMEVideoDecoder.h
+++ b/dom/media/platforms/agnostic/eme/EMEVideoDecoder.h
@@ -3,34 +3,23 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef EMEVideoDecoder_h_
#define EMEVideoDecoder_h_
#include "GMPVideoDecoder.h"
-#include "PlatformDecoderModule.h"
namespace mozilla {
class CDMProxy;
+class MediaRawData;
class TaskQueue;
-class EMEVideoCallbackAdapter : public VideoCallbackAdapter {
-public:
- EMEVideoCallbackAdapter(MediaDataDecoderCallbackProxy* aCallback,
- VideoInfo aVideoInfo,
- layers::ImageContainer* aImageContainer)
- : VideoCallbackAdapter(aCallback, aVideoInfo, aImageContainer)
- {}
-
- void Error(GMPErr aErr) override;
-};
-
class EMEVideoDecoder : public GMPVideoDecoder {
public:
EMEVideoDecoder(CDMProxy* aProxy, const GMPVideoDecoderParams& aParams);
private:
void InitTags(nsTArray<nsCString>& aTags) override;
nsCString GetNodeId() override;
uint32_t DecryptorId() const override { return mDecryptorId; }
--- a/dom/media/platforms/agnostic/eme/SamplesWaitingForKey.cpp
+++ b/dom/media/platforms/agnostic/eme/SamplesWaitingForKey.cpp
@@ -1,85 +1,74 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
-#include "SamplesWaitingForKey.h"
#include "mozilla/CDMProxy.h"
#include "mozilla/CDMCaps.h"
+#include "mozilla/TaskQueue.h"
#include "MediaData.h"
+#include "SamplesWaitingForKey.h"
namespace mozilla {
-SamplesWaitingForKey::SamplesWaitingForKey(MediaDataDecoder* aDecoder,
- MediaDataDecoderCallback* aCallback,
- TaskQueue* aTaskQueue,
- CDMProxy* aProxy)
+SamplesWaitingForKey::SamplesWaitingForKey(CDMProxy* aProxy)
: mMutex("SamplesWaitingForKey")
- , mDecoder(aDecoder)
- , mDecoderCallback(aCallback)
- , mTaskQueue(aTaskQueue)
, mProxy(aProxy)
{
}
SamplesWaitingForKey::~SamplesWaitingForKey()
{
+ Flush();
}
-bool
+RefPtr<SamplesWaitingForKey::WaitForKeyPromise>
SamplesWaitingForKey::WaitIfKeyNotUsable(MediaRawData* aSample)
{
if (!aSample || !aSample->mCrypto.mValid || !mProxy) {
- return false;
+ return WaitForKeyPromise::CreateAndResolve(aSample, __func__);
}
CDMCaps::AutoLock caps(mProxy->Capabilites());
const auto& keyid = aSample->mCrypto.mKeyId;
- if (!caps.IsKeyUsable(keyid)) {
- {
- MutexAutoLock lock(mMutex);
- mSamples.AppendElement(aSample);
- }
- caps.NotifyWhenKeyIdUsable(aSample->mCrypto.mKeyId, this);
- return true;
+ if (caps.IsKeyUsable(keyid)) {
+ return WaitForKeyPromise::CreateAndResolve(aSample, __func__);
}
- return false;
+ SampleEntry entry;
+ entry.mSample = aSample;
+ RefPtr<WaitForKeyPromise> p = entry.mPromise.Ensure(__func__);
+ {
+ MutexAutoLock lock(mMutex);
+ mSamples.AppendElement(Move(entry));
+ }
+ caps.NotifyWhenKeyIdUsable(aSample->mCrypto.mKeyId, this);
+ return p;
}
void
SamplesWaitingForKey::NotifyUsable(const CencKeyId& aKeyId)
{
MutexAutoLock lock(mMutex);
size_t i = 0;
while (i < mSamples.Length()) {
- if (aKeyId == mSamples[i]->mCrypto.mKeyId) {
- RefPtr<nsIRunnable> task;
- task = NewRunnableMethod<RefPtr<MediaRawData>>(mDecoder,
- &MediaDataDecoder::Input,
- RefPtr<MediaRawData>(mSamples[i]));
+ auto& entry = mSamples[i];
+ if (aKeyId == entry.mSample->mCrypto.mKeyId) {
+ entry.mPromise.Resolve(entry.mSample, __func__);
mSamples.RemoveElementAt(i);
- mTaskQueue->Dispatch(task.forget());
} else {
i++;
}
}
}
void
SamplesWaitingForKey::Flush()
{
MutexAutoLock lock(mMutex);
- mSamples.Clear();
-}
-
-void
-SamplesWaitingForKey::BreakCycles()
-{
- MutexAutoLock lock(mMutex);
- mDecoder = nullptr;
- mTaskQueue = nullptr;
- mProxy = nullptr;
+ for (auto& sample : mSamples) {
+ sample.mPromise.Reject(true, __func__);
+ }
mSamples.Clear();
}
} // namespace mozilla
--- a/dom/media/platforms/agnostic/eme/SamplesWaitingForKey.h
+++ b/dom/media/platforms/agnostic/eme/SamplesWaitingForKey.h
@@ -2,57 +2,56 @@
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef SamplesWaitingForKey_h_
#define SamplesWaitingForKey_h_
-#include "mozilla/TaskQueue.h"
-
-#include "PlatformDecoderModule.h"
+#include "mozilla/MozPromise.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/RefPtr.h"
namespace mozilla {
typedef nsTArray<uint8_t> CencKeyId;
class CDMProxy;
+class MediaRawData;
// Encapsulates the task of waiting for the CDMProxy to have the necessary
// keys to decrypt a given sample.
-class SamplesWaitingForKey {
+class SamplesWaitingForKey
+{
public:
-
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SamplesWaitingForKey)
- explicit SamplesWaitingForKey(MediaDataDecoder* aDecoder,
- MediaDataDecoderCallback* aCallback,
- TaskQueue* aTaskQueue,
- CDMProxy* aProxy);
+ typedef MozPromise<RefPtr<MediaRawData>, bool, /* IsExclusive = */ true>
+ WaitForKeyPromise;
- // Returns true if we need to wait for a key to become usable.
- // Will callback MediaDataDecoder::Input(aSample) on mDecoder once the
- // sample is ready to be decrypted. The order of input samples is
- // preserved.
- bool WaitIfKeyNotUsable(MediaRawData* aSample);
+ explicit SamplesWaitingForKey(CDMProxy* aProxy);
+
+ // Returns a promise that will be resolved if or when a key for decoding the
+ // sample becomes usable.
+ RefPtr<WaitForKeyPromise> WaitIfKeyNotUsable(MediaRawData* aSample);
void NotifyUsable(const CencKeyId& aKeyId);
void Flush();
- void BreakCycles();
-
protected:
~SamplesWaitingForKey();
private:
Mutex mMutex;
- RefPtr<MediaDataDecoder> mDecoder;
- MediaDataDecoderCallback* mDecoderCallback;
- RefPtr<TaskQueue> mTaskQueue;
RefPtr<CDMProxy> mProxy;
- nsTArray<RefPtr<MediaRawData>> mSamples;
+ struct SampleEntry
+ {
+ RefPtr<MediaRawData> mSample;
+ MozPromiseHolder<WaitForKeyPromise> mPromise;
+ };
+ nsTArray<SampleEntry> mSamples;
};
} // namespace mozilla
#endif // SamplesWaitingForKey_h_
--- a/dom/media/platforms/agnostic/gmp/GMPDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/gmp/GMPDecoderModule.cpp
@@ -27,41 +27,41 @@ GMPDecoderModule::GMPDecoderModule()
{
}
GMPDecoderModule::~GMPDecoderModule()
{
}
static already_AddRefed<MediaDataDecoderProxy>
-CreateDecoderWrapper(MediaDataDecoderCallback* aCallback)
+CreateDecoderWrapper()
{
RefPtr<gmp::GeckoMediaPluginService> s(gmp::GeckoMediaPluginService::GetGeckoMediaPluginService());
if (!s) {
return nullptr;
}
RefPtr<AbstractThread> thread(s->GetAbstractGMPThread());
if (!thread) {
return nullptr;
}
- RefPtr<MediaDataDecoderProxy> decoder(new MediaDataDecoderProxy(thread.forget(), aCallback));
+ RefPtr<MediaDataDecoderProxy> decoder(new MediaDataDecoderProxy(thread.forget()));
return decoder.forget();
}
already_AddRefed<MediaDataDecoder>
GMPDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
{
if (!MP4Decoder::IsH264(aParams.mConfig.mMimeType) &&
!VPXDecoder::IsVP8(aParams.mConfig.mMimeType) &&
!VPXDecoder::IsVP9(aParams.mConfig.mMimeType)) {
return nullptr;
}
- RefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper(aParams.mCallback);
- auto params = GMPVideoDecoderParams(aParams).WithCallback(wrapper);
+ RefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper();
+ auto params = GMPVideoDecoderParams(aParams);
wrapper->SetProxyTarget(new GMPVideoDecoder(params));
return wrapper.forget();
}
already_AddRefed<MediaDataDecoder>
GMPDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
{
return nullptr;
--- a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
+++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
@@ -22,18 +22,27 @@ static bool IsOnGMPThread()
nsCOMPtr<nsIThread> gmpThread;
nsresult rv = mps->GetThread(getter_AddRefs(gmpThread));
MOZ_ASSERT(NS_SUCCEEDED(rv) && gmpThread);
return NS_GetCurrentThread() == gmpThread;
}
#endif
+GMPVideoDecoderParams::GMPVideoDecoderParams(const CreateDecoderParams& aParams)
+ : mConfig(aParams.VideoConfig())
+ , mTaskQueue(aParams.mTaskQueue)
+ , mImageContainer(aParams.mImageContainer)
+ , mLayersBackend(aParams.GetLayersBackend())
+ , mCrashHelper(aParams.mCrashHelper)
+{
+}
+
void
-VideoCallbackAdapter::Decoded(GMPVideoi420Frame* aDecodedFrame)
+GMPVideoDecoder::Decoded(GMPVideoi420Frame* aDecodedFrame)
{
GMPUniquePtr<GMPVideoi420Frame> decodedFrame(aDecodedFrame);
MOZ_ASSERT(IsOnGMPThread());
VideoData::YCbCrBuffer b;
for (int i = 0; i < kGMPNumOfPlanes; ++i) {
b.mPlanes[i].mData = decodedFrame->Buffer(GMPPlaneType(i));
@@ -46,129 +55,99 @@ VideoCallbackAdapter::Decoded(GMPVideoi4
b.mPlanes[i].mHeight = (decodedFrame->Height() + 1) / 2;
}
b.mPlanes[i].mOffset = 0;
b.mPlanes[i].mSkip = 0;
}
gfx::IntRect pictureRegion(0, 0, decodedFrame->Width(), decodedFrame->Height());
RefPtr<VideoData> v =
- VideoData::CreateAndCopyData(mVideoInfo,
+ VideoData::CreateAndCopyData(mConfig,
mImageContainer,
mLastStreamOffset,
decodedFrame->Timestamp(),
decodedFrame->Duration(),
b,
false,
-1,
pictureRegion);
+ RefPtr<GMPVideoDecoder> self = this;
if (v) {
- mCallback->Output(v);
+ mDecodedData.AppendElement(Move(v));
} else {
- mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
+ mDecodedData.Clear();
+ mDecodePromise.RejectIfExists(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("CallBack::CreateAndCopyData")),
+ __func__);
}
}
void
-VideoCallbackAdapter::ReceivedDecodedReferenceFrame(const uint64_t aPictureId)
+GMPVideoDecoder::ReceivedDecodedReferenceFrame(const uint64_t aPictureId)
{
MOZ_ASSERT(IsOnGMPThread());
}
void
-VideoCallbackAdapter::ReceivedDecodedFrame(const uint64_t aPictureId)
+GMPVideoDecoder::ReceivedDecodedFrame(const uint64_t aPictureId)
{
MOZ_ASSERT(IsOnGMPThread());
}
void
-VideoCallbackAdapter::InputDataExhausted()
+GMPVideoDecoder::InputDataExhausted()
{
MOZ_ASSERT(IsOnGMPThread());
- mCallback->InputExhausted();
-}
-
-void
-VideoCallbackAdapter::DrainComplete()
-{
- MOZ_ASSERT(IsOnGMPThread());
- mCallback->DrainComplete();
+ mDecodePromise.ResolveIfExists(mDecodedData, __func__);
+ mDecodedData.Clear();
}
void
-VideoCallbackAdapter::ResetComplete()
+GMPVideoDecoder::DrainComplete()
{
MOZ_ASSERT(IsOnGMPThread());
- mCallback->FlushComplete();
-}
-
-void
-VideoCallbackAdapter::Error(GMPErr aErr)
-{
- MOZ_ASSERT(IsOnGMPThread());
- mCallback->Error(MediaResult(aErr == GMPDecodeErr
- ? NS_ERROR_DOM_MEDIA_DECODE_ERR
- : NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("GMPErr:%x", aErr)));
+ mDrainPromise.ResolveIfExists(mDecodedData, __func__);
+ mDecodedData.Clear();
}
void
-VideoCallbackAdapter::Terminated()
+GMPVideoDecoder::ResetComplete()
{
- // Note that this *may* be called from the proxy thread also.
- mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("Video GMP decoder terminated.")));
+ MOZ_ASSERT(IsOnGMPThread());
+ mFlushPromise.ResolveIfExists(true, __func__);
}
-GMPVideoDecoderParams::GMPVideoDecoderParams(const CreateDecoderParams& aParams)
- : mConfig(aParams.VideoConfig())
- , mTaskQueue(aParams.mTaskQueue)
- , mCallback(nullptr)
- , mAdapter(nullptr)
- , mImageContainer(aParams.mImageContainer)
- , mLayersBackend(aParams.GetLayersBackend())
- , mCrashHelper(aParams.mCrashHelper)
-{}
-
-GMPVideoDecoderParams&
-GMPVideoDecoderParams::WithCallback(MediaDataDecoderProxy* aWrapper)
+void
+GMPVideoDecoder::Error(GMPErr aErr)
{
- MOZ_ASSERT(aWrapper);
- MOZ_ASSERT(!mCallback); // Should only be called once per instance.
- mCallback = aWrapper->Callback();
- mAdapter = nullptr;
- return *this;
+ MOZ_ASSERT(IsOnGMPThread());
+ auto error = MediaResult(aErr == GMPDecodeErr ? NS_ERROR_DOM_MEDIA_DECODE_ERR
+ : NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("GMPErr:%x", aErr));
+ mDecodePromise.RejectIfExists(error, __func__);
+ mDrainPromise.RejectIfExists(error, __func__);
+ mFlushPromise.RejectIfExists(error, __func__);
}
-GMPVideoDecoderParams&
-GMPVideoDecoderParams::WithAdapter(VideoCallbackAdapter* aAdapter)
+void
+GMPVideoDecoder::Terminated()
{
- MOZ_ASSERT(aAdapter);
- MOZ_ASSERT(!mAdapter); // Should only be called once per instance.
- mCallback = aAdapter->Callback();
- mAdapter = aAdapter;
- return *this;
+ MOZ_ASSERT(IsOnGMPThread());
+ Error(GMPErr::GMPAbortedErr);
}
GMPVideoDecoder::GMPVideoDecoder(const GMPVideoDecoderParams& aParams)
: mConfig(aParams.mConfig)
- , mCallback(aParams.mCallback)
, mGMP(nullptr)
, mHost(nullptr)
- , mAdapter(aParams.mAdapter)
, mConvertNALUnitLengths(false)
, mCrashHelper(aParams.mCrashHelper)
+ , mImageContainer(aParams.mImageContainer)
{
- MOZ_ASSERT(!mAdapter || mCallback == mAdapter->Callback());
- if (!mAdapter) {
- mAdapter = new VideoCallbackAdapter(mCallback,
- VideoInfo(mConfig.mDisplay.width,
- mConfig.mDisplay.height),
- aParams.mImageContainer);
- }
}
void
GMPVideoDecoder::InitTags(nsTArray<nsCString>& aTags)
{
if (MP4Decoder::IsH264(mConfig.mMimeType)) {
aTags.AppendElement(NS_LITERAL_CSTRING("h264"));
} else if (VPXDecoder::IsVP8(mConfig.mMimeType)) {
@@ -185,26 +164,22 @@ GMPVideoDecoder::GetNodeId()
}
GMPUniquePtr<GMPVideoEncodedFrame>
GMPVideoDecoder::CreateFrame(MediaRawData* aSample)
{
GMPVideoFrame* ftmp = nullptr;
GMPErr err = mHost->CreateFrame(kGMPEncodedVideoFrame, &ftmp);
if (GMP_FAILED(err)) {
- mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
- RESULT_DETAIL("Host::CreateFrame:%x", err)));
return nullptr;
}
GMPUniquePtr<GMPVideoEncodedFrame> frame(static_cast<GMPVideoEncodedFrame*>(ftmp));
err = frame->CreateEmptyFrame(aSample->Size());
if (GMP_FAILED(err)) {
- mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
- RESULT_DETAIL("GMPVideoEncodedFrame::CreateEmptyFrame:%x", err)));
return nullptr;
}
memcpy(frame->Buffer(), aSample->Data(), frame->Size());
// Convert 4-byte NAL unit lengths to host-endian 4-byte buffer lengths to
// suit the GMP API.
if (mConvertNALUnitLengths) {
@@ -273,17 +248,17 @@ GMPVideoDecoder::GMPInitDone(GMPVideoDec
mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
return;
}
codec.mWidth = mConfig.mImage.width;
codec.mHeight = mConfig.mImage.height;
nsresult rv = aGMP->InitDecode(codec,
codecSpecific,
- mAdapter,
+ this,
PR_GetNumberOfProcessors());
if (NS_FAILED(rv)) {
aGMP->Close();
mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
return;
}
mGMP = aGMP;
@@ -321,71 +296,90 @@ GMPVideoDecoder::Init()
Move(callback),
DecryptorId()))) {
mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
}
return promise;
}
-void
-GMPVideoDecoder::Input(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+GMPVideoDecoder::Decode(MediaRawData* aSample)
{
MOZ_ASSERT(IsOnGMPThread());
RefPtr<MediaRawData> sample(aSample);
if (!mGMP) {
- mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("mGMP not initialized")));
- return;
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("mGMP not initialized")),
+ __func__);
}
- mAdapter->SetLastStreamOffset(sample->mOffset);
+ mLastStreamOffset = sample->mOffset;
GMPUniquePtr<GMPVideoEncodedFrame> frame = CreateFrame(sample);
if (!frame) {
- mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
- RESULT_DETAIL("CreateFrame returned null")));
- return;
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("CreateFrame returned null")),
+ __func__);
}
+ RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
nsTArray<uint8_t> info; // No codec specific per-frame info to pass.
nsresult rv = mGMP->Decode(Move(frame), false, info, 0);
if (NS_FAILED(rv)) {
- mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
- RESULT_DETAIL("mGMP->Decode:%x", rv)));
+ mDecodePromise.Reject(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("mGMP->Decode:%x", rv)),
+ __func__);
}
+ return p;
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
GMPVideoDecoder::Flush()
{
MOZ_ASSERT(IsOnGMPThread());
+ mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ mDrainPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+
+ RefPtr<FlushPromise> p = mFlushPromise.Ensure(__func__);
if (!mGMP || NS_FAILED(mGMP->Reset())) {
// Abort the flush.
- mCallback->FlushComplete();
+ mFlushPromise.Resolve(true, __func__);
}
+ return p;
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
GMPVideoDecoder::Drain()
{
MOZ_ASSERT(IsOnGMPThread());
+ MOZ_ASSERT(mDecodePromise.IsEmpty(), "Must wait for decoding to complete");
+
+ RefPtr<DecodePromise> p = mDrainPromise.Ensure(__func__);
if (!mGMP || NS_FAILED(mGMP->Drain())) {
- mCallback->DrainComplete();
+ mDrainPromise.Resolve(DecodedData(), __func__);
}
+
+ return p;
}
-void
+RefPtr<ShutdownPromise>
GMPVideoDecoder::Shutdown()
{
mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ mFlushPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+
// Note that this *may* be called from the proxy thread also.
+ // TODO: If that's the case, then this code is racy.
if (!mGMP) {
- return;
+ return ShutdownPromise::CreateAndResolve(true, __func__);
}
// Note this unblocks flush and drain operations waiting for callbacks.
mGMP->Close();
mGMP = nullptr;
+ return ShutdownPromise::CreateAndResolve(true, __func__);
}
} // namespace mozilla
--- a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h
+++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h
@@ -11,79 +11,54 @@
#include "ImageContainer.h"
#include "MediaDataDecoderProxy.h"
#include "PlatformDecoderModule.h"
#include "mozIGeckoMediaPluginService.h"
#include "MediaInfo.h"
namespace mozilla {
-class VideoCallbackAdapter : public GMPVideoDecoderCallbackProxy {
+struct GMPVideoDecoderParams
+{
+ explicit GMPVideoDecoderParams(const CreateDecoderParams& aParams);
+
+ const VideoInfo& mConfig;
+ TaskQueue* mTaskQueue;
+ layers::ImageContainer* mImageContainer;
+ layers::LayersBackend mLayersBackend;
+ RefPtr<GMPCrashHelper> mCrashHelper;
+};
+
+class GMPVideoDecoder : public MediaDataDecoder,
+ public GMPVideoDecoderCallbackProxy
+{
public:
- VideoCallbackAdapter(MediaDataDecoderCallbackProxy* aCallback,
- VideoInfo aVideoInfo,
- layers::ImageContainer* aImageContainer)
- : mCallback(aCallback)
- , mLastStreamOffset(0)
- , mVideoInfo(aVideoInfo)
- , mImageContainer(aImageContainer)
- {}
+ explicit GMPVideoDecoder(const GMPVideoDecoderParams& aParams);
- MediaDataDecoderCallbackProxy* Callback() const { return mCallback; }
+ RefPtr<InitPromise> Init() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
+ const char* GetDescriptionName() const override
+ {
+ return "GMP video decoder";
+ }
// GMPVideoDecoderCallbackProxy
+ // All those methods are called on the GMP thread.
void Decoded(GMPVideoi420Frame* aDecodedFrame) override;
void ReceivedDecodedReferenceFrame(const uint64_t aPictureId) override;
void ReceivedDecodedFrame(const uint64_t aPictureId) override;
void InputDataExhausted() override;
void DrainComplete() override;
void ResetComplete() override;
void Error(GMPErr aErr) override;
void Terminated() override;
- void SetLastStreamOffset(int64_t aStreamOffset) {
- mLastStreamOffset = aStreamOffset;
- }
-
-private:
- MediaDataDecoderCallbackProxy* mCallback;
- int64_t mLastStreamOffset;
-
- VideoInfo mVideoInfo;
- RefPtr<layers::ImageContainer> mImageContainer;
-};
-
-struct GMPVideoDecoderParams {
- explicit GMPVideoDecoderParams(const CreateDecoderParams& aParams);
- GMPVideoDecoderParams& WithCallback(MediaDataDecoderProxy* aWrapper);
- GMPVideoDecoderParams& WithAdapter(VideoCallbackAdapter* aAdapter);
-
- const VideoInfo& mConfig;
- TaskQueue* mTaskQueue;
- MediaDataDecoderCallbackProxy* mCallback;
- VideoCallbackAdapter* mAdapter;
- layers::ImageContainer* mImageContainer;
- layers::LayersBackend mLayersBackend;
- RefPtr<GMPCrashHelper> mCrashHelper;
-};
-
-class GMPVideoDecoder : public MediaDataDecoder {
-public:
- explicit GMPVideoDecoder(const GMPVideoDecoderParams& aParams);
-
- RefPtr<InitPromise> Init() override;
- void Input(MediaRawData* aSample) override;
- void Flush() override;
- void Drain() override;
- void Shutdown() override;
- const char* GetDescriptionName() const override
- {
- return "GMP video decoder";
- }
-
protected:
virtual void InitTags(nsTArray<nsCString>& aTags);
virtual nsCString GetNodeId();
virtual uint32_t DecryptorId() const { return 0; }
virtual GMPUniquePtr<GMPVideoEncodedFrame> CreateFrame(MediaRawData* aSample);
virtual const VideoInfo& GetConfig() const;
private:
@@ -102,21 +77,27 @@ private:
}
private:
RefPtr<GMPVideoDecoder> mDecoder;
};
void GMPInitDone(GMPVideoDecoderProxy* aGMP, GMPVideoHost* aHost);
const VideoInfo mConfig;
- MediaDataDecoderCallbackProxy* mCallback;
nsCOMPtr<mozIGeckoMediaPluginService> mMPS;
GMPVideoDecoderProxy* mGMP;
GMPVideoHost* mHost;
- nsAutoPtr<VideoCallbackAdapter> mAdapter;
bool mConvertNALUnitLengths;
MozPromiseHolder<InitPromise> mInitPromise;
RefPtr<GMPCrashHelper> mCrashHelper;
+
+ int64_t mLastStreamOffset = 0;
+ RefPtr<layers::ImageContainer> mImageContainer;
+
+ MozPromiseHolder<DecodePromise> mDecodePromise;
+ MozPromiseHolder<DecodePromise> mDrainPromise;
+ MozPromiseHolder<FlushPromise> mFlushPromise;
+ DecodedData mDecodedData;
};
} // namespace mozilla
#endif // GMPVideoDecoder_h_
--- a/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.cpp
+++ b/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.cpp
@@ -1,90 +1,73 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "MediaDataDecoderProxy.h"
#include "MediaData.h"
+#include "mozilla/SyncRunnable.h"
namespace mozilla {
-void
-MediaDataDecoderCallbackProxy::Error(const MediaResult& aError)
-{
- mProxyCallback->Error(aError);
-}
-
-void
-MediaDataDecoderCallbackProxy::FlushComplete()
-{
- mProxyDecoder->FlushComplete();
-}
-
-RefPtr<MediaDataDecoder::InitPromise>
-MediaDataDecoderProxy::InternalInit()
-{
- return mProxyDecoder->Init();
-}
-
RefPtr<MediaDataDecoder::InitPromise>
MediaDataDecoderProxy::Init()
{
MOZ_ASSERT(!mIsShutdown);
- return InvokeAsync(mProxyThread, this, __func__,
- &MediaDataDecoderProxy::InternalInit);
+ RefPtr<MediaDataDecoderProxy> self = this;
+ return InvokeAsync(mProxyThread, __func__,
+ [self, this]() { return mProxyDecoder->Init(); });
}
-void
-MediaDataDecoderProxy::Input(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+MediaDataDecoderProxy::Decode(MediaRawData* aSample)
{
MOZ_ASSERT(!IsOnProxyThread());
MOZ_ASSERT(!mIsShutdown);
- nsCOMPtr<nsIRunnable> task(new InputTask(mProxyDecoder, aSample));
- mProxyThread->Dispatch(task.forget());
+ RefPtr<MediaDataDecoderProxy> self = this;
+ RefPtr<MediaRawData> sample = aSample;
+ return InvokeAsync(mProxyThread, __func__, [self, this, sample]() {
+ return mProxyDecoder->Decode(sample);
+ });
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
MediaDataDecoderProxy::Flush()
{
MOZ_ASSERT(!IsOnProxyThread());
MOZ_ASSERT(!mIsShutdown);
- mFlushComplete.Set(false);
-
- mProxyThread->Dispatch(NewRunnableMethod(mProxyDecoder, &MediaDataDecoder::Flush));
-
- mFlushComplete.WaitUntil(true);
+ RefPtr<MediaDataDecoderProxy> self = this;
+ return InvokeAsync(mProxyThread, __func__,
+ [self, this]() { return mProxyDecoder->Flush(); });
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
MediaDataDecoderProxy::Drain()
{
MOZ_ASSERT(!IsOnProxyThread());
MOZ_ASSERT(!mIsShutdown);
- mProxyThread->Dispatch(NewRunnableMethod(mProxyDecoder, &MediaDataDecoder::Drain));
+ RefPtr<MediaDataDecoderProxy> self = this;
+ return InvokeAsync(mProxyThread, __func__,
+ [self, this]() { return mProxyDecoder->Drain(); });
}
-void
+RefPtr<ShutdownPromise>
MediaDataDecoderProxy::Shutdown()
{
+ MOZ_ASSERT(!IsOnProxyThread());
// Note that this *may* be called from the proxy thread also.
MOZ_ASSERT(!mIsShutdown);
#if defined(DEBUG)
mIsShutdown = true;
#endif
- mProxyThread->AsEventTarget()->Dispatch(NewRunnableMethod(mProxyDecoder,
- &MediaDataDecoder::Shutdown),
- NS_DISPATCH_SYNC);
-}
-void
-MediaDataDecoderProxy::FlushComplete()
-{
- mFlushComplete.Set(true);
+ RefPtr<MediaDataDecoderProxy> self = this;
+ return InvokeAsync(mProxyThread, __func__,
+ [self, this]() { return mProxyDecoder->Shutdown(); });
}
} // namespace mozilla
--- a/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.h
+++ b/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.h
@@ -3,174 +3,67 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined(MediaDataDecoderProxy_h_)
#define MediaDataDecoderProxy_h_
#include "PlatformDecoderModule.h"
+#include "mozilla/Atomics.h"
#include "mozilla/RefPtr.h"
#include "nsThreadUtils.h"
#include "nscore.h"
#include "GMPService.h"
namespace mozilla {
-class InputTask : public Runnable {
-public:
- InputTask(MediaDataDecoder* aDecoder,
- MediaRawData* aSample)
- : mDecoder(aDecoder)
- , mSample(aSample)
- {}
-
- NS_IMETHOD Run() override {
- mDecoder->Input(mSample);
- return NS_OK;
- }
-
-private:
- RefPtr<MediaDataDecoder> mDecoder;
- RefPtr<MediaRawData> mSample;
-};
-
-template<typename T>
-class Condition {
+class MediaDataDecoderProxy : public MediaDataDecoder
+{
public:
- explicit Condition(T aValue)
- : mMonitor("Condition")
- , mCondition(aValue)
- {}
-
- void Set(T aValue) {
- MonitorAutoLock mon(mMonitor);
- mCondition = aValue;
- mon.NotifyAll();
- }
-
- void WaitUntil(T aValue) {
- MonitorAutoLock mon(mMonitor);
- while (mCondition != aValue) {
- mon.Wait();
- }
- }
-
-private:
- Monitor mMonitor;
- T mCondition;
-};
-
-class MediaDataDecoderProxy;
-
-class MediaDataDecoderCallbackProxy : public MediaDataDecoderCallback {
-public:
- MediaDataDecoderCallbackProxy(MediaDataDecoderProxy* aProxyDecoder,
- MediaDataDecoderCallback* aCallback)
- : mProxyDecoder(aProxyDecoder)
- , mProxyCallback(aCallback)
- {
- }
-
- void Output(MediaData* aData) override {
- mProxyCallback->Output(aData);
- }
-
- void Error(const MediaResult& aError) override;
-
- void InputExhausted() override {
- mProxyCallback->InputExhausted();
- }
-
- void DrainComplete() override {
- mProxyCallback->DrainComplete();
- }
-
- void ReleaseMediaResources() override {
- mProxyCallback->ReleaseMediaResources();
- }
-
- void FlushComplete();
-
- bool OnReaderTaskQueue() override
- {
- return mProxyCallback->OnReaderTaskQueue();
- }
-
-private:
- MediaDataDecoderProxy* mProxyDecoder;
- MediaDataDecoderCallback* mProxyCallback;
-};
-
-class MediaDataDecoderProxy : public MediaDataDecoder {
-public:
- MediaDataDecoderProxy(already_AddRefed<AbstractThread> aProxyThread,
- MediaDataDecoderCallback* aCallback)
+ explicit MediaDataDecoderProxy(already_AddRefed<AbstractThread> aProxyThread)
: mProxyThread(aProxyThread)
- , mProxyCallback(this, aCallback)
- , mFlushComplete(false)
#if defined(DEBUG)
, mIsShutdown(false)
#endif
{
}
- // Ideally, this would return a regular MediaDataDecoderCallback pointer
- // to retain the clean abstraction, but until MediaDataDecoderCallback
- // supports the FlushComplete interface, this will have to do. When MDDC
- // supports FlushComplete, this, the GMP*Decoders, and the
- // *CallbackAdapters can be reverted to accepting a regular
- // MediaDataDecoderCallback pointer.
- MediaDataDecoderCallbackProxy* Callback()
- {
- return &mProxyCallback;
- }
-
void SetProxyTarget(MediaDataDecoder* aProxyDecoder)
{
MOZ_ASSERT(aProxyDecoder);
mProxyDecoder = aProxyDecoder;
}
// These are called from the decoder thread pool.
- // Init and Shutdown run synchronously on the proxy thread, all others are
- // asynchronously and responded to via the MediaDataDecoderCallback.
- // Note: the nsresults returned by the proxied decoder are lost.
+ // Shutdown run synchronously on the proxy thread, all others are
+ // asynchronous.
RefPtr<InitPromise> Init() override;
- void Input(MediaRawData* aSample) override;
- void Flush() override;
- void Drain() override;
- void Shutdown() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
const char* GetDescriptionName() const override
{
return "GMP proxy data decoder";
}
- // Called by MediaDataDecoderCallbackProxy.
- void FlushComplete();
-
private:
- RefPtr<InitPromise> InternalInit();
#ifdef DEBUG
- bool IsOnProxyThread() {
+ bool IsOnProxyThread()
+ {
return mProxyThread && mProxyThread->IsCurrentThreadIn();
}
#endif
- friend class InputTask;
- friend class InitTask;
-
RefPtr<MediaDataDecoder> mProxyDecoder;
RefPtr<AbstractThread> mProxyThread;
- MediaDataDecoderCallbackProxy mProxyCallback;
-
- Condition<bool> mFlushComplete;
#if defined(DEBUG)
- bool mIsShutdown;
+ Atomic<bool> mIsShutdown;
#endif
};
} // namespace mozilla
#endif // MediaDataDecoderProxy_h_
--- a/dom/media/platforms/android/AndroidDecoderModule.cpp
+++ b/dom/media/platforms/android/AndroidDecoderModule.cpp
@@ -48,17 +48,18 @@ TranslateMimeType(const nsACString& aMim
}
static bool
GetFeatureStatus(int32_t aFeature)
{
nsCOMPtr<nsIGfxInfo> gfxInfo = services::GetGfxInfo();
int32_t status = nsIGfxInfo::FEATURE_STATUS_UNKNOWN;
nsCString discardFailureId;
- if (!gfxInfo || NS_FAILED(gfxInfo->GetFeatureStatus(aFeature, discardFailureId, &status))) {
+ if (!gfxInfo || NS_FAILED(gfxInfo->GetFeatureStatus(
+ aFeature, discardFailureId, &status))) {
return false;
}
return status == nsIGfxInfo::FEATURE_STATUS_OK;
};
CryptoInfo::LocalRef
GetCryptoInfoFromSample(const MediaRawData* aSample)
{
@@ -67,18 +68,18 @@ GetCryptoInfoFromSample(const MediaRawDa
if (!cryptoObj.mValid) {
return nullptr;
}
CryptoInfo::LocalRef cryptoInfo;
nsresult rv = CryptoInfo::New(&cryptoInfo);
NS_ENSURE_SUCCESS(rv, nullptr);
- uint32_t numSubSamples =
- std::min<uint32_t>(cryptoObj.mPlainSizes.Length(), cryptoObj.mEncryptedSizes.Length());
+ uint32_t numSubSamples = std::min<uint32_t>(
+ cryptoObj.mPlainSizes.Length(), cryptoObj.mEncryptedSizes.Length());
uint32_t totalSubSamplesSize = 0;
for (auto& size : cryptoObj.mEncryptedSizes) {
totalSubSamplesSize += size;
}
// mPlainSizes is uint16_t, need to transform to uint32_t first.
nsTArray<uint32_t> plainSizes;
@@ -100,29 +101,26 @@ GetCryptoInfoFromSample(const MediaRawDa
// Padding with 0
tempIV.AppendElement(0);
}
auto numBytesOfPlainData = mozilla::jni::IntArray::New(
reinterpret_cast<int32_t*>(&plainSizes[0]),
plainSizes.Length());
- auto numBytesOfEncryptedData =
- mozilla::jni::IntArray::New(reinterpret_cast<const int32_t*>(&cryptoObj.mEncryptedSizes[0]),
- cryptoObj.mEncryptedSizes.Length());
+ auto numBytesOfEncryptedData = mozilla::jni::IntArray::New(
+ reinterpret_cast<const int32_t*>(&cryptoObj.mEncryptedSizes[0]),
+ cryptoObj.mEncryptedSizes.Length());
auto iv = mozilla::jni::ByteArray::New(reinterpret_cast<int8_t*>(&tempIV[0]),
- tempIV.Length());
- auto keyId = mozilla::jni::ByteArray::New(reinterpret_cast<const int8_t*>(&cryptoObj.mKeyId[0]),
- cryptoObj.mKeyId.Length());
- cryptoInfo->Set(numSubSamples,
- numBytesOfPlainData,
- numBytesOfEncryptedData,
- keyId,
- iv,
- MediaCodec::CRYPTO_MODE_AES_CTR);
+ tempIV.Length());
+ auto keyId = mozilla::jni::ByteArray::New(
+ reinterpret_cast<const int8_t*>(&cryptoObj.mKeyId[0]),
+ cryptoObj.mKeyId.Length());
+ cryptoInfo->Set(numSubSamples, numBytesOfPlainData, numBytesOfEncryptedData,
+ keyId, iv, MediaCodec::CRYPTO_MODE_AES_CTR);
return cryptoInfo;
}
AndroidDecoderModule::AndroidDecoderModule(CDMProxy* aProxy)
{
mProxy = static_cast<MediaDrmCDMProxy*>(aProxy);
}
@@ -163,17 +161,17 @@ AndroidDecoderModule::SupportsMimeType(c
// on content demuxed from mp4.
if (OpusDataDecoder::IsOpus(aMimeType) ||
VorbisDataDecoder::IsVorbis(aMimeType)) {
LOG("Rejecting audio of type %s", aMimeType.Data());
return false;
}
return java::HardwareCodecCapabilityUtils::FindDecoderCodecInfoForMimeType(
- nsCString(TranslateMimeType(aMimeType)));
+ nsCString(TranslateMimeType(aMimeType)));
}
already_AddRefed<MediaDataDecoder>
AndroidDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
{
// Temporary - forces use of VPXDecoder when alpha is present.
// Bug 1263836 will handle alpha scenario once implemented. It will shift
// the check for alpha to PDMFactory but not itself remove the need for a
@@ -190,31 +188,23 @@ AndroidDecoderModule::CreateVideoDecoder
config.mDisplay.height,
&format), nullptr);
nsString drmStubId;
if (mProxy) {
drmStubId = mProxy->GetMediaDrmStubId();
}
- RefPtr<MediaDataDecoder> decoder = MediaPrefs::PDMAndroidRemoteCodecEnabled() ?
- RemoteDataDecoder::CreateVideoDecoder(config,
- format,
- aParams.mCallback,
- aParams.mImageContainer,
- drmStubId,
- mProxy,
- aParams.mTaskQueue) :
- MediaCodecDataDecoder::CreateVideoDecoder(config,
- format,
- aParams.mCallback,
- aParams.mImageContainer,
- drmStubId,
- mProxy,
- aParams.mTaskQueue);
+ RefPtr<MediaDataDecoder> decoder =
+ MediaPrefs::PDMAndroidRemoteCodecEnabled()
+ ? RemoteDataDecoder::CreateVideoDecoder(
+ config, format, aParams.mImageContainer, drmStubId, mProxy,
+ aParams.mTaskQueue)
+ : MediaCodecDataDecoder::CreateVideoDecoder(
+ config, format, aParams.mImageContainer, drmStubId, mProxy);
return decoder.forget();
}
already_AddRefed<MediaDataDecoder>
AndroidDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
{
const AudioInfo& config = aParams.AudioConfig();
if (config.mBitDepth != 16) {
@@ -232,29 +222,22 @@ AndroidDecoderModule::CreateAudioDecoder
config.mRate,
config.mChannels,
&format), nullptr);
nsString drmStubId;
if (mProxy) {
drmStubId = mProxy->GetMediaDrmStubId();
}
- RefPtr<MediaDataDecoder> decoder = MediaPrefs::PDMAndroidRemoteCodecEnabled() ?
- RemoteDataDecoder::CreateAudioDecoder(config,
- format,
- aParams.mCallback,
- drmStubId,
- mProxy,
- aParams.mTaskQueue) :
- MediaCodecDataDecoder::CreateAudioDecoder(config,
- format,
- aParams.mCallback,
- drmStubId,
- mProxy,
- aParams.mTaskQueue);
+ RefPtr<MediaDataDecoder> decoder =
+ MediaPrefs::PDMAndroidRemoteCodecEnabled()
+ ? RemoteDataDecoder::CreateAudioDecoder(config, format, drmStubId, mProxy,
+ aParams.mTaskQueue)
+ : MediaCodecDataDecoder::CreateAudioDecoder(config, format, drmStubId,
+ mProxy);
return decoder.forget();
}
PlatformDecoderModule::ConversionRequired
AndroidDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
{
if (aConfig.IsVideo()) {
return ConversionRequired::kNeedAnnexB;
--- a/dom/media/platforms/android/AndroidDecoderModule.h
+++ b/dom/media/platforms/android/AndroidDecoderModule.h
@@ -5,17 +5,18 @@
#ifndef AndroidDecoderModule_h_
#define AndroidDecoderModule_h_
#include "PlatformDecoderModule.h"
#include "mozilla/MediaDrmCDMProxy.h"
namespace mozilla {
-class AndroidDecoderModule : public PlatformDecoderModule {
+class AndroidDecoderModule : public PlatformDecoderModule
+{
public:
already_AddRefed<MediaDataDecoder>
CreateVideoDecoder(const CreateDecoderParams& aParams) override;
already_AddRefed<MediaDataDecoder>
CreateAudioDecoder(const CreateDecoderParams& aParams) override;
AndroidDecoderModule(CDMProxy* aProxy = nullptr);
--- a/dom/media/platforms/android/MediaCodecDataDecoder.cpp
+++ b/dom/media/platforms/android/MediaCodecDataDecoder.cpp
@@ -29,42 +29,34 @@
using namespace mozilla;
using namespace mozilla::gl;
using namespace mozilla::java;
using namespace mozilla::java::sdk;
using media::TimeUnit;
namespace mozilla {
-#define INVOKE_CALLBACK(Func, ...) \
- if (mCallback) { \
- mCallback->Func(__VA_ARGS__); \
- } else { \
- NS_WARNING("Callback not set"); \
- }
-
static MediaCodec::LocalRef
CreateDecoder(const nsACString& aMimeType)
{
MediaCodec::LocalRef codec;
NS_ENSURE_SUCCESS(MediaCodec::CreateDecoderByType(TranslateMimeType(aMimeType),
&codec), nullptr);
return codec;
}
class VideoDataDecoder : public MediaCodecDataDecoder
{
public:
VideoDataDecoder(const VideoInfo& aConfig,
MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer,
const nsString& aDrmStubId)
: MediaCodecDataDecoder(MediaData::Type::VIDEO_DATA, aConfig.mMimeType,
- aFormat, aCallback, aDrmStubId)
+ aFormat, aDrmStubId)
, mImageContainer(aImageContainer)
, mConfig(aConfig)
{
}
const char* GetDescriptionName() const override
{
@@ -114,78 +106,42 @@ public:
presentationTimeUs,
aDuration.ToMicroseconds(),
img,
isSync,
presentationTimeUs,
gfx::IntRect(0, 0,
mConfig.mDisplay.width,
mConfig.mDisplay.height));
- INVOKE_CALLBACK(Output, v);
+ if (!v) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ MonitorAutoLock mon(mMonitor);
+ mDecodedData.AppendElement(Move(v));
return NS_OK;
}
- bool SupportDecoderRecycling() const override { return mIsCodecSupportAdaptivePlayback; }
+ bool SupportDecoderRecycling() const override
+ {
+ return mIsCodecSupportAdaptivePlayback;
+ }
protected:
layers::ImageContainer* mImageContainer;
const VideoInfo& mConfig;
RefPtr<AndroidSurfaceTexture> mSurfaceTexture;
};
-
-
-class EMEVideoDataDecoder : public VideoDataDecoder {
-public:
- EMEVideoDataDecoder(const VideoInfo& aConfig,
- MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback,
- layers::ImageContainer* aImageContainer,
- const nsString& aDrmStubId,
- CDMProxy* aProxy,
- TaskQueue* aTaskQueue)
- : VideoDataDecoder(aConfig, aFormat, aCallback, aImageContainer, aDrmStubId)
- , mSamplesWaitingForKey(new SamplesWaitingForKey(this, aCallback,
- aTaskQueue, aProxy))
- {
- }
-
- void Input(MediaRawData* aSample) override;
- void Shutdown() override;
-
-private:
- RefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
-};
-
-void
-EMEVideoDataDecoder::Input(MediaRawData* aSample)
-{
- if (mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)) {
- return;
- }
- VideoDataDecoder::Input(aSample);
-}
-
-void
-EMEVideoDataDecoder::Shutdown()
-{
- VideoDataDecoder::Shutdown();
-
- mSamplesWaitingForKey->BreakCycles();
- mSamplesWaitingForKey = nullptr;
-}
-
class AudioDataDecoder : public MediaCodecDataDecoder
{
public:
AudioDataDecoder(const AudioInfo& aConfig, MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback,
const nsString& aDrmStubId)
: MediaCodecDataDecoder(MediaData::Type::AUDIO_DATA, aConfig.mMimeType,
- aFormat, aCallback, aDrmStubId)
+ aFormat, aDrmStubId)
{
JNIEnv* const env = jni::GetEnvForThread();
jni::ByteBuffer::LocalRef buffer(env);
NS_ENSURE_SUCCESS_VOID(aFormat->GetByteBuffer(NS_LITERAL_STRING("csd-0"),
&buffer));
if (!buffer && aConfig.mCodecSpecificConfig->Length() >= 2) {
@@ -198,17 +154,17 @@ public:
}
const char* GetDescriptionName() const override
{
return "android audio decoder";
}
nsresult Output(BufferInfo::Param aInfo, void* aBuffer,
- MediaFormat::Param aFormat, const TimeUnit& aDuration)
+ MediaFormat::Param aFormat, const TimeUnit& aDuration) override
{
// The output on Android is always 16-bit signed
nsresult rv;
int32_t numChannels;
NS_ENSURE_SUCCESS(rv =
aFormat->GetInteger(NS_LITERAL_STRING("channel-count"), &numChannels), rv);
AudioConfig::ChannelLayout layout(numChannels);
if (!layout.IsValid()) {
@@ -245,144 +201,96 @@ public:
NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);
RefPtr<AudioData> data = new AudioData(0, presentationTimeUs,
aDuration.ToMicroseconds(),
numFrames,
Move(audio),
numChannels,
sampleRate);
- INVOKE_CALLBACK(Output, data);
+ MonitorAutoLock mon(mMonitor);
+ mDecodedData.AppendElement(Move(data));
return NS_OK;
}
};
-class EMEAudioDataDecoder : public AudioDataDecoder {
-public:
- EMEAudioDataDecoder(const AudioInfo& aConfig, MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback, const nsString& aDrmStubId,
- CDMProxy* aProxy, TaskQueue* aTaskQueue)
- : AudioDataDecoder(aConfig, aFormat, aCallback, aDrmStubId)
- , mSamplesWaitingForKey(new SamplesWaitingForKey(this, aCallback,
- aTaskQueue, aProxy))
- {
+already_AddRefed<MediaDataDecoder>
+MediaCodecDataDecoder::CreateAudioDecoder(const AudioInfo& aConfig,
+ java::sdk::MediaFormat::Param aFormat,
+ const nsString& aDrmStubId,
+ CDMProxy* aProxy)
+{
+ RefPtr<MediaDataDecoder> decoder;
+ if (!aProxy) {
+ decoder = new AudioDataDecoder(aConfig, aFormat, aDrmStubId);
+ } else {
+ // TODO in bug 1334061.
}
-
- void Input(MediaRawData* aSample) override;
- void Shutdown() override;
-
-private:
- RefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
-};
-
-void
-EMEAudioDataDecoder::Input(MediaRawData* aSample)
-{
- if (mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)) {
- return;
- }
- AudioDataDecoder::Input(aSample);
-}
-
-void
-EMEAudioDataDecoder::Shutdown()
-{
- AudioDataDecoder::Shutdown();
-
- mSamplesWaitingForKey->BreakCycles();
- mSamplesWaitingForKey = nullptr;
+ return decoder.forget();
}
-MediaDataDecoder*
-MediaCodecDataDecoder::CreateAudioDecoder(const AudioInfo& aConfig,
- java::sdk::MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback,
- const nsString& aDrmStubId,
- CDMProxy* aProxy,
- TaskQueue* aTaskQueue)
-{
- if (!aProxy) {
- return new AudioDataDecoder(aConfig, aFormat, aCallback, aDrmStubId);
- } else {
- return new EMEAudioDataDecoder(aConfig,
- aFormat,
- aCallback,
- aDrmStubId,
- aProxy,
- aTaskQueue);
- }
-}
-
-MediaDataDecoder*
+already_AddRefed<MediaDataDecoder>
MediaCodecDataDecoder::CreateVideoDecoder(const VideoInfo& aConfig,
java::sdk::MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer,
const nsString& aDrmStubId,
- CDMProxy* aProxy,
- TaskQueue* aTaskQueue)
+ CDMProxy* aProxy)
{
+ RefPtr<MediaDataDecoder> decoder;
if (!aProxy) {
- return new VideoDataDecoder(aConfig, aFormat, aCallback, aImageContainer, aDrmStubId);
+ decoder = new VideoDataDecoder(aConfig, aFormat, aImageContainer, aDrmStubId);
} else {
- return new EMEVideoDataDecoder(aConfig,
- aFormat,
- aCallback,
- aImageContainer,
- aDrmStubId,
- aProxy,
- aTaskQueue);
+ // TODO in bug 1334061.
}
+ return decoder.forget();
}
MediaCodecDataDecoder::MediaCodecDataDecoder(MediaData::Type aType,
const nsACString& aMimeType,
MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback,
const nsString& aDrmStubId)
: mType(aType)
, mMimeType(aMimeType)
, mFormat(aFormat)
- , mCallback(aCallback)
, mInputBuffers(nullptr)
, mOutputBuffers(nullptr)
+ , mError(false)
, mMonitor("MediaCodecDataDecoder::mMonitor")
, mState(ModuleState::kDecoding)
, mDrmStubId(aDrmStubId)
{
+ mDecodePromise.SetMonitor(&mMonitor);
+ mDrainPromise.SetMonitor(&mMonitor);
}
MediaCodecDataDecoder::~MediaCodecDataDecoder()
{
Shutdown();
}
RefPtr<MediaDataDecoder::InitPromise>
MediaCodecDataDecoder::Init()
{
nsresult rv = InitDecoder(nullptr);
TrackInfo::TrackType type =
(mType == MediaData::AUDIO_DATA ? TrackInfo::TrackType::kAudioTrack
: TrackInfo::TrackType::kVideoTrack);
- return NS_SUCCEEDED(rv) ?
- InitPromise::CreateAndResolve(type, __func__) :
- InitPromise::CreateAndReject(
- NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ return NS_SUCCEEDED(rv) ? InitPromise::CreateAndResolve(type, __func__)
+ : InitPromise::CreateAndReject(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
}
nsresult
MediaCodecDataDecoder::InitDecoder(Surface::Param aSurface)
{
mDecoder = CreateDecoder(mMimeType);
if (!mDecoder) {
- INVOKE_CALLBACK(Error,
- MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__));
return NS_ERROR_FAILURE;
}
// Check if the video codec supports adaptive playback or not.
if (aSurface) {
mIsCodecSupportAdaptivePlayback =
java::HardwareCodecCapabilityUtils::CheckSupportsAdaptivePlayback(mDecoder,
nsCString(TranslateMimeType(mMimeType)));
@@ -390,42 +298,65 @@ MediaCodecDataDecoder::InitDecoder(Surfa
// TODO: may need to find a way to not use hard code to decide the max w/h.
mFormat->SetInteger(MediaFormat::KEY_MAX_WIDTH, 1920);
mFormat->SetInteger(MediaFormat::KEY_MAX_HEIGHT, 1080);
}
}
MediaCrypto::LocalRef crypto = MediaDrmProxy::GetMediaCrypto(mDrmStubId);
bool hascrypto = !!crypto;
- LOG("Has(%d) MediaCrypto (%s)", hascrypto, NS_ConvertUTF16toUTF8(mDrmStubId).get());
+ LOG("Has(%d) MediaCrypto (%s)", hascrypto,
+ NS_ConvertUTF16toUTF8(mDrmStubId).get());
nsresult rv;
NS_ENSURE_SUCCESS(rv = mDecoder->Configure(mFormat, aSurface, crypto, 0), rv);
NS_ENSURE_SUCCESS(rv = mDecoder->Start(), rv);
NS_ENSURE_SUCCESS(rv = ResetInputBuffers(), rv);
NS_ENSURE_SUCCESS(rv = ResetOutputBuffers(), rv);
- nsCOMPtr<nsIRunnable> r = NewRunnableMethod(this, &MediaCodecDataDecoder::DecoderLoop);
+ nsCOMPtr<nsIRunnable> r =
+ NewRunnableMethod(this, &MediaCodecDataDecoder::DecoderLoop);
rv = NS_NewNamedThread("MC Decoder", getter_AddRefs(mThread), r);
return rv;
}
// This is in usec, so that's 10ms.
static const int64_t kDecoderTimeout = 10000;
-#define BREAK_ON_DECODER_ERROR() \
- if (NS_FAILED(res)) { \
- NS_WARNING("Exiting decoder loop due to exception"); \
- if (mState == ModuleState::kDrainDecoder) { \
- INVOKE_CALLBACK(DrainComplete); \
- SetState(ModuleState::kDecoding); \
- } \
- INVOKE_CALLBACK(Error, MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__)); \
- break; \
+#define BREAK_ON_DECODER_ERROR_LOCKED() \
+ if (NS_FAILED(res)) { \
+ mError = true; \
+ mMonitor.AssertCurrentThreadOwns(); \
+ NS_WARNING("Exiting decoder loop due to exception"); \
+ if (mState == ModuleState::kDrainDecoder) { \
+ mDrainPromise.RejectIfExists( \
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__), __func__); \
+ SetState(ModuleState::kDecoding); \
+ break; \
+ } \
+ mDecodePromise.RejectIfExists( \
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__), __func__); \
+ break; \
+ }
+
+#define BREAK_ON_DECODER_ERROR() \
+ if (NS_FAILED(res)) { \
+ mError = true; \
+ MonitorAutoLock mon(mMonitor); \
+ NS_WARNING("Exiting decoder loop due to exception"); \
+ if (mState == ModuleState::kDrainDecoder) { \
+ mDrainPromise.RejectIfExists( \
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__), __func__); \
+ SetState(ModuleState::kDecoding); \
+ break; \
+ } \
+ mDecodePromise.RejectIfExists( \
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__), __func__); \
+ break; \
}
nsresult
MediaCodecDataDecoder::GetInputBuffer(
JNIEnv* aEnv, int aIndex, jni::Object::LocalRef* aBuffer)
{
MOZ_ASSERT(aEnv);
MOZ_ASSERT(!*aBuffer);
@@ -446,19 +377,20 @@ MediaCodecDataDecoder::GetInputBuffer(
return NS_ERROR_FAILURE;
}
bool
MediaCodecDataDecoder::WaitForInput()
{
MonitorAutoLock lock(mMonitor);
- while (mState == ModuleState::kDecoding && mQueue.empty()) {
- // Signal that we require more input.
- INVOKE_CALLBACK(InputExhausted);
+ while (mState == ModuleState::kDecoding && mQueue.empty()) {
+ // We're done processing the current sample.
+ mDecodePromise.ResolveIfExists(mDecodedData, __func__);
+ mDecodedData.Clear();
lock.Wait();
}
return mState != ModuleState::kStopping;
}
already_AddRefed<MediaRawData>
@@ -557,19 +489,20 @@ MediaCodecDataDecoder::QueueEOS()
void
MediaCodecDataDecoder::HandleEOS(int32_t aOutputStatus)
{
MonitorAutoLock lock(mMonitor);
if (mState == ModuleState::kDrainWaitEOS) {
SetState(ModuleState::kDecoding);
+
+ mDrainPromise.ResolveIfExists(mDecodedData, __func__);
+ mDecodedData.Clear();
mMonitor.Notify();
-
- INVOKE_CALLBACK(DrainComplete);
}
mDecoder->ReleaseOutputBuffer(aOutputStatus, false);
}
Maybe<TimeUnit>
MediaCodecDataDecoder::GetOutputDuration()
{
@@ -601,19 +534,17 @@ MediaCodecDataDecoder::ProcessOutput(
if (buffer) {
// The buffer will be null on Android L if we are decoding to a Surface.
void* directBuffer = frame.GetEnv()->GetDirectBufferAddress(buffer.Get());
Output(aInfo, directBuffer, aFormat, duration.value());
}
// The Surface will be updated at this point (for video).
mDecoder->ReleaseOutputBuffer(aStatus, true);
- PostOutput(aInfo, aFormat, duration.value());
-
- return NS_OK;
+ return PostOutput(aInfo, aFormat, duration.value());
}
void
MediaCodecDataDecoder::DecoderLoop()
{
bool isOutputDone = false;
AutoLocalJNIFrame frame(jni::GetEnvForThread(), 1);
MediaFormat::LocalRef outputFormat(frame.GetEnv());
@@ -622,17 +553,17 @@ MediaCodecDataDecoder::DecoderLoop()
while (WaitForInput()) {
RefPtr<MediaRawData> sample = PeekNextSample();
{
MonitorAutoLock lock(mMonitor);
if (mState == ModuleState::kDrainDecoder) {
MOZ_ASSERT(!sample, "Shouldn't have a sample when pushing EOF frame");
res = QueueEOS();
- BREAK_ON_DECODER_ERROR();
+ BREAK_ON_DECODER_ERROR_LOCKED();
}
}
if (sample) {
res = QueueSample(sample);
if (NS_SUCCEEDED(res)) {
// We've fed this into the decoder, so remove it from the queue.
MonitorAutoLock lock(mMonitor);
@@ -651,29 +582,31 @@ MediaCodecDataDecoder::DecoderLoop()
BREAK_ON_DECODER_ERROR();
int32_t outputStatus = -1;
res = mDecoder->DequeueOutputBuffer(bufferInfo, kDecoderTimeout,
&outputStatus);
BREAK_ON_DECODER_ERROR();
if (outputStatus == MediaCodec::INFO_TRY_AGAIN_LATER) {
- // We might want to call mCallback->InputExhausted() here, but there seems
- // to be some possible bad interactions here with the threading.
} else if (outputStatus == MediaCodec::INFO_OUTPUT_BUFFERS_CHANGED) {
res = ResetOutputBuffers();
BREAK_ON_DECODER_ERROR();
} else if (outputStatus == MediaCodec::INFO_OUTPUT_FORMAT_CHANGED) {
res = mDecoder->GetOutputFormat(ReturnTo(&outputFormat));
BREAK_ON_DECODER_ERROR();
} else if (outputStatus < 0) {
NS_WARNING("Unknown error from decoder!");
- INVOKE_CALLBACK(Error,
- MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
- __func__));
+ {
+ const auto result =
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__);
+ MonitorAutoLock mon(mMonitor);
+ mDecodePromise.RejectIfExists(result, __func__);
+ mDrainPromise.RejectIfExists(result, __func__);
+ }
// Don't break here just in case it's recoverable. If it's not, other
// stuff will fail later and we'll bail out.
} else {
// We have a valid buffer index >= 0 here.
int32_t flags;
nsresult res = bufferInfo->Flags(&flags);
BREAK_ON_DECODER_ERROR();
@@ -710,16 +643,18 @@ MediaCodecDataDecoder::ModuleStateStr(Mo
default: MOZ_ASSERT_UNREACHABLE("Invalid state.");
}
return "Unknown";
}
bool
MediaCodecDataDecoder::SetState(ModuleState aState)
{
+ mMonitor.AssertCurrentThreadOwns();
+
bool ok = true;
if (mState == ModuleState::kShutdown) {
ok = false;
} else if (mState == ModuleState::kStopping) {
ok = aState == ModuleState::kShutdown;
} else if (aState == ModuleState::kDrainDecoder) {
ok = mState == ModuleState::kDrainQueue;
@@ -739,67 +674,80 @@ MediaCodecDataDecoder::SetState(ModuleSt
void
MediaCodecDataDecoder::ClearQueue()
{
mMonitor.AssertCurrentThreadOwns();
mQueue.clear();
mDurations.clear();
+ mDecodedData.Clear();
}
-void
-MediaCodecDataDecoder::Input(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+MediaCodecDataDecoder::Decode(MediaRawData* aSample)
{
+ if (mError) {
+ return DecodePromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ __func__);
+ }
MonitorAutoLock lock(mMonitor);
+ RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
mQueue.push_back(aSample);
lock.NotifyAll();
+ return p;
}
nsresult
MediaCodecDataDecoder::ResetInputBuffers()
{
return mDecoder->GetInputBuffers(ReturnTo(&mInputBuffers));
}
nsresult
MediaCodecDataDecoder::ResetOutputBuffers()
{
return mDecoder->GetOutputBuffers(ReturnTo(&mOutputBuffers));
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
MediaCodecDataDecoder::Flush()
{
MonitorAutoLock lock(mMonitor);
+ mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ mDrainPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
if (!SetState(ModuleState::kFlushing)) {
- return;
+ return FlushPromise::CreateAndResolve(true, __func__);
}
lock.Notify();
while (mState == ModuleState::kFlushing) {
lock.Wait();
}
+ return FlushPromise::CreateAndResolve(true, __func__);
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
MediaCodecDataDecoder::Drain()
{
+ if (mError) {
+ return DecodePromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ __func__);
+ }
MonitorAutoLock lock(mMonitor);
- if (mState == ModuleState::kDrainDecoder ||
- mState == ModuleState::kDrainQueue) {
- return;
- }
+ RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
+ MOZ_ASSERT(mState != ModuleState::kDrainDecoder
+ && mState != ModuleState::kDrainQueue, "Already draining");
SetState(ModuleState::kDrainQueue);
lock.Notify();
+ return p;
}
-
-void
+RefPtr<ShutdownPromise>
MediaCodecDataDecoder::Shutdown()
{
MonitorAutoLock lock(mMonitor);
SetState(ModuleState::kStopping);
lock.Notify();
while (mThread && mState != ModuleState::kShutdown) {
@@ -811,11 +759,13 @@ MediaCodecDataDecoder::Shutdown()
mThread = nullptr;
}
if (mDecoder) {
mDecoder->Stop();
mDecoder->Release();
mDecoder = nullptr;
}
+
+ return ShutdownPromise::CreateAndResolve(true, __func__);
}
} // mozilla
--- a/dom/media/platforms/android/MediaCodecDataDecoder.h
+++ b/dom/media/platforms/android/MediaCodecDataDecoder.h
@@ -5,71 +5,67 @@
#ifndef MediaCodecDataDecoder_h_
#define MediaCodecDataDecoder_h_
#include "AndroidDecoderModule.h"
#include "MediaCodec.h"
#include "SurfaceTexture.h"
#include "TimeUnits.h"
+#include "mozilla/Atomics.h"
#include "mozilla/Monitor.h"
#include "mozilla/Maybe.h"
#include <deque>
namespace mozilla {
typedef std::deque<RefPtr<MediaRawData>> SampleQueue;
-class MediaCodecDataDecoder : public MediaDataDecoder {
+class MediaCodecDataDecoder : public MediaDataDecoder
+{
public:
- static MediaDataDecoder* CreateAudioDecoder(const AudioInfo& aConfig,
- java::sdk::MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback,
- const nsString& aDrmStubId,
- CDMProxy* aProxy,
- TaskQueue* aTaskQueue);
+ static already_AddRefed<MediaDataDecoder> CreateAudioDecoder(
+ const AudioInfo& aConfig, java::sdk::MediaFormat::Param aFormat,
+ const nsString& aDrmStubId, CDMProxy* aProxy);
- static MediaDataDecoder* CreateVideoDecoder(const VideoInfo& aConfig,
- java::sdk::MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback,
- layers::ImageContainer* aImageContainer,
- const nsString& aDrmStubId,
- CDMProxy* aProxy,
- TaskQueue* aTaskQueue);
+ static already_AddRefed<MediaDataDecoder> CreateVideoDecoder(
+ const VideoInfo& aConfig, java::sdk::MediaFormat::Param aFormat,
+ layers::ImageContainer* aImageContainer, const nsString& aDrmStubId,
+ CDMProxy* aProxy);
- virtual ~MediaCodecDataDecoder();
+ ~MediaCodecDataDecoder();
- RefPtr<MediaDataDecoder::InitPromise> Init() override;
- void Flush() override;
- void Drain() override;
- void Shutdown() override;
- void Input(MediaRawData* aSample) override;
+ RefPtr<InitPromise> Init() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
const char* GetDescriptionName() const override
{
return "Android MediaCodec decoder";
}
protected:
- enum class ModuleState : uint8_t {
+ enum class ModuleState : uint8_t
+ {
kDecoding = 0,
kFlushing,
kDrainQueue,
kDrainDecoder,
kDrainWaitEOS,
kStopping,
kShutdown
};
friend class AndroidDecoderModule;
MediaCodecDataDecoder(MediaData::Type aType,
const nsACString& aMimeType,
java::sdk::MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback,
const nsString& aDrmStubId);
static const char* ModuleStateStr(ModuleState aState);
virtual nsresult InitDecoder(java::sdk::Surface::Param aSurface);
virtual nsresult Output(java::sdk::BufferInfo::Param aInfo, void* aBuffer,
java::sdk::MediaFormat::Param aFormat, const media::TimeUnit& aDuration)
@@ -94,44 +90,49 @@ protected:
nsresult QueueSample(const MediaRawData* aSample);
nsresult QueueEOS();
void HandleEOS(int32_t aOutputStatus);
Maybe<media::TimeUnit> GetOutputDuration();
nsresult ProcessOutput(java::sdk::BufferInfo::Param aInfo,
java::sdk::MediaFormat::Param aFormat,
int32_t aStatus);
// Sets decoder state and returns whether the new state has become effective.
+ // Must hold the monitor.
bool SetState(ModuleState aState);
void DecoderLoop();
virtual void ClearQueue();
MediaData::Type mType;
nsAutoCString mMimeType;
java::sdk::MediaFormat::GlobalRef mFormat;
- MediaDataDecoderCallback* mCallback;
-
java::sdk::MediaCodec::GlobalRef mDecoder;
jni::ObjectArray::GlobalRef mInputBuffers;
jni::ObjectArray::GlobalRef mOutputBuffers;
nsCOMPtr<nsIThread> mThread;
+ Atomic<bool> mError;
+
// Only these members are protected by mMonitor.
Monitor mMonitor;
ModuleState mState;
SampleQueue mQueue;
// Durations are stored in microseconds.
std::deque<media::TimeUnit> mDurations;
nsString mDrmStubId;
bool mIsCodecSupportAdaptivePlayback = false;
+
+ MozPromiseHolder<DecodePromise> mDecodePromise;
+ MozPromiseHolder<DecodePromise> mDrainPromise;
+ DecodedData mDecodedData;
};
} // namespace mozilla
#endif
--- a/dom/media/platforms/android/RemoteDataDecoder.cpp
+++ b/dom/media/platforms/android/RemoteDataDecoder.cpp
@@ -14,19 +14,19 @@
#include "VPXDecoder.h"
#include "nsThreadUtils.h"
#include "nsPromiseFlatString.h"
#include "nsIGfxInfo.h"
#include "prlog.h"
+#include <deque>
#include <jni.h>
-#include <deque>
#undef LOG
#define LOG(arg, ...) MOZ_LOG(sAndroidDecoderModuleLog, \
mozilla::LogLevel::Debug, ("RemoteDataDecoder(%p)::%s: " arg, \
this, __func__, ##__VA_ARGS__))
using namespace mozilla;
using namespace mozilla::gl;
@@ -38,84 +38,86 @@ namespace mozilla {
class JavaCallbacksSupport
: public CodecProxy::NativeCallbacks::Natives<JavaCallbacksSupport>
{
public:
typedef CodecProxy::NativeCallbacks::Natives<JavaCallbacksSupport> Base;
using Base::AttachNative;
- JavaCallbacksSupport(MediaDataDecoderCallback* aDecoderCallback)
- : mDecoderCallback(aDecoderCallback)
- {
- MOZ_ASSERT(aDecoderCallback);
- }
+ JavaCallbacksSupport() : mCanceled(false) { }
- virtual ~JavaCallbacksSupport() {}
+ virtual ~JavaCallbacksSupport() { }
+
+ virtual void HandleInputExhausted() = 0;
void OnInputExhausted()
{
- if (mDecoderCallback) {
- mDecoderCallback->InputExhausted();
+ if (!mCanceled) {
+ HandleInputExhausted();
}
}
virtual void HandleOutput(Sample::Param aSample) = 0;
void OnOutput(jni::Object::Param aSample)
{
- if (mDecoderCallback) {
+ if (!mCanceled) {
HandleOutput(Sample::Ref::From(aSample));
}
}
- virtual void HandleOutputFormatChanged(MediaFormat::Param aFormat) {};
+ virtual void HandleOutputFormatChanged(MediaFormat::Param aFormat) { };
void OnOutputFormatChanged(jni::Object::Param aFormat)
{
- if (mDecoderCallback) {
+ if (!mCanceled) {
HandleOutputFormatChanged(MediaFormat::Ref::From(aFormat));
}
}
+ virtual void HandleError(const MediaResult& aError) = 0;
+
void OnError(bool aIsFatal)
{
- if (mDecoderCallback) {
- mDecoderCallback->Error(aIsFatal ?
- MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__) :
- MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__));
+ if (!mCanceled) {
+ HandleError(
+ aIsFatal ? MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__)
+ : MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__));
}
}
void DisposeNative()
{
// TODO
}
void Cancel()
{
- mDecoderCallback = nullptr;
+ mCanceled = true;
}
-protected:
- MediaDataDecoderCallback* mDecoderCallback;
+private:
+ Atomic<bool> mCanceled;
};
class RemoteVideoDecoder : public RemoteDataDecoder
{
public:
- // Hold an output buffer and render it to the surface when the frame is sent to compositor, or
- // release it if not presented.
+ // Hold an output buffer and render it to the surface when the frame is sent
+ // to compositor, or release it if not presented.
class RenderOrReleaseOutput : public VideoData::Listener
{
public:
- RenderOrReleaseOutput(java::CodecProxy::Param aCodec, java::Sample::Param aSample)
- : mCodec(aCodec),
- mSample(aSample)
- {}
+ RenderOrReleaseOutput(java::CodecProxy::Param aCodec,
+ java::Sample::Param aSample)
+ : mCodec(aCodec)
+ , mSample(aSample)
+ {
+ }
~RenderOrReleaseOutput()
{
ReleaseOutput(false);
}
void OnSentToCompositor() override
{
@@ -134,154 +136,161 @@ public:
java::CodecProxy::GlobalRef mCodec;
java::Sample::GlobalRef mSample;
};
class CallbacksSupport final : public JavaCallbacksSupport
{
public:
- CallbacksSupport(RemoteVideoDecoder* aDecoder, MediaDataDecoderCallback* aCallback)
- : JavaCallbacksSupport(aCallback)
- , mDecoder(aDecoder)
- {}
+ CallbacksSupport(RemoteVideoDecoder* aDecoder) : mDecoder(aDecoder) { }
- virtual ~CallbacksSupport() {}
+ void HandleInputExhausted() override
+ {
+ mDecoder->InputExhausted();
+ }
void HandleOutput(Sample::Param aSample) override
{
Maybe<int64_t> durationUs = mDecoder->mInputDurations.Get();
if (!durationUs) {
return;
}
BufferInfo::LocalRef info = aSample->Info();
int32_t flags;
bool ok = NS_SUCCEEDED(info->Flags(&flags));
- MOZ_ASSERT(ok);
int32_t offset;
- ok |= NS_SUCCEEDED(info->Offset(&offset));
- MOZ_ASSERT(ok);
+ ok &= NS_SUCCEEDED(info->Offset(&offset));
int64_t presentationTimeUs;
- ok |= NS_SUCCEEDED(info->PresentationTimeUs(&presentationTimeUs));
- MOZ_ASSERT(ok);
+ ok &= NS_SUCCEEDED(info->PresentationTimeUs(&presentationTimeUs));
int32_t size;
- ok |= NS_SUCCEEDED(info->Size(&size));
- MOZ_ASSERT(ok);
+ ok &= NS_SUCCEEDED(info->Size(&size));
- NS_ENSURE_TRUE_VOID(ok);
+ if (!ok) {
+ HandleError(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("VideoCallBack::HandleOutput")));
+ return;
+ }
if (size > 0) {
- RefPtr<layers::Image> img =
- new SurfaceTextureImage(mDecoder->mSurfaceTexture.get(), mDecoder->mConfig.mDisplay,
- gl::OriginPos::BottomLeft);
+ RefPtr<layers::Image> img = new SurfaceTextureImage(
+ mDecoder->mSurfaceTexture.get(), mDecoder->mConfig.mDisplay,
+ gl::OriginPos::BottomLeft);
- RefPtr<VideoData> v =
- VideoData::CreateFromImage(mDecoder->mConfig,
- offset,
- presentationTimeUs,
- durationUs.value(),
- img,
- !!(flags & MediaCodec::BUFFER_FLAG_SYNC_FRAME),
- presentationTimeUs,
- gfx::IntRect(0, 0,
- mDecoder->mConfig.mDisplay.width,
- mDecoder->mConfig.mDisplay.height));
+ RefPtr<VideoData> v = VideoData::CreateFromImage(
+ mDecoder->mConfig, offset, presentationTimeUs, durationUs.value(),
+ img, !!(flags & MediaCodec::BUFFER_FLAG_SYNC_FRAME),
+ presentationTimeUs,
+ gfx::IntRect(0, 0, mDecoder->mConfig.mDisplay.width,
+ mDecoder->mConfig.mDisplay.height));
- UniquePtr<VideoData::Listener> listener(new RenderOrReleaseOutput(mDecoder->mJavaDecoder, aSample));
+ UniquePtr<VideoData::Listener> listener(
+ new RenderOrReleaseOutput(mDecoder->mJavaDecoder, aSample));
v->SetListener(Move(listener));
- mDecoderCallback->Output(v);
+ mDecoder->Output(v);
}
if ((flags & MediaCodec::BUFFER_FLAG_END_OF_STREAM) != 0) {
- mDecoderCallback->DrainComplete();
+ mDecoder->DrainComplete();
}
}
+ void HandleError(const MediaResult& aError) override
+ {
+ mDecoder->Error(aError);
+ }
+
friend class RemoteDataDecoder;
private:
RemoteVideoDecoder* mDecoder;
};
RemoteVideoDecoder(const VideoInfo& aConfig,
MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer,
- const nsString& aDrmStubId)
+ const nsString& aDrmStubId, TaskQueue* aTaskQueue)
: RemoteDataDecoder(MediaData::Type::VIDEO_DATA, aConfig.mMimeType,
- aFormat, aCallback, aDrmStubId)
+ aFormat, aDrmStubId, aTaskQueue)
, mImageContainer(aImageContainer)
, mConfig(aConfig)
{
}
RefPtr<InitPromise> Init() override
{
mSurfaceTexture = AndroidSurfaceTexture::Create();
if (!mSurfaceTexture) {
NS_WARNING("Failed to create SurfaceTexture for video decode\n");
- return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ __func__);
}
if (!jni::IsFennec()) {
NS_WARNING("Remote decoding not supported in non-Fennec environment\n");
- return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ __func__);
}
// Register native methods.
JavaCallbacksSupport::Init();
mJavaCallbacks = CodecProxy::NativeCallbacks::New();
- JavaCallbacksSupport::AttachNative(mJavaCallbacks,
- mozilla::MakeUnique<CallbacksSupport>(this, mCallback));
+ JavaCallbacksSupport::AttachNative(
+ mJavaCallbacks, mozilla::MakeUnique<CallbacksSupport>(this));
mJavaDecoder = CodecProxy::Create(mFormat,
mSurfaceTexture->JavaSurface(),
mJavaCallbacks,
mDrmStubId);
if (mJavaDecoder == nullptr) {
- return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ __func__);
}
- mIsCodecSupportAdaptivePlayback = mJavaDecoder->IsAdaptivePlaybackSupported();
- mInputDurations.Clear();
+ mIsCodecSupportAdaptivePlayback =
+ mJavaDecoder->IsAdaptivePlaybackSupported();
return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
}
- void Flush() override
+ RefPtr<MediaDataDecoder::FlushPromise> Flush() override
{
mInputDurations.Clear();
- RemoteDataDecoder::Flush();
+ return RemoteDataDecoder::Flush();
}
- void Drain() override
+ RefPtr<MediaDataDecoder::DecodePromise> Drain() override
{
- RemoteDataDecoder::Drain();
mInputDurations.Put(0);
+ return RemoteDataDecoder::Drain();
}
- void Input(MediaRawData* aSample) override
+ RefPtr<MediaDataDecoder::DecodePromise> Decode(MediaRawData* aSample) override
{
- RemoteDataDecoder::Input(aSample);
mInputDurations.Put(aSample->mDuration);
+ return RemoteDataDecoder::Decode(aSample);
}
- bool SupportDecoderRecycling() const override { return mIsCodecSupportAdaptivePlayback; }
+ bool SupportDecoderRecycling() const override
+ {
+ return mIsCodecSupportAdaptivePlayback;
+ }
private:
- class DurationQueue {
+ class DurationQueue
+ {
public:
- DurationQueue() : mMutex("Video duration queue") {}
+ DurationQueue() : mMutex("Video duration queue") { }
void Clear()
{
MutexAutoLock lock(mMutex);
mValues.clear();
}
void Put(int64_t aDurationUs)
@@ -304,331 +313,345 @@ private:
}
private:
Mutex mMutex; // To protect mValues.
std::deque<int64_t> mValues;
};
layers::ImageContainer* mImageContainer;
- const VideoInfo& mConfig;
+ const VideoInfo mConfig;
RefPtr<AndroidSurfaceTexture> mSurfaceTexture;
DurationQueue mInputDurations;
bool mIsCodecSupportAdaptivePlayback = false;
};
-class RemoteEMEVideoDecoder : public RemoteVideoDecoder {
-public:
- RemoteEMEVideoDecoder(const VideoInfo& aConfig,
- MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback,
- layers::ImageContainer* aImageContainer,
- const nsString& aDrmStubId,
- CDMProxy* aProxy,
- TaskQueue* aTaskQueue)
- : RemoteVideoDecoder(aConfig, aFormat, aCallback, aImageContainer, aDrmStubId)
- , mSamplesWaitingForKey(new SamplesWaitingForKey(this, aCallback,
- aTaskQueue, aProxy))
- {
- }
-
- void Input(MediaRawData* aSample) override;
- void Shutdown() override;
-
-private:
- RefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
-};
-
-void
-RemoteEMEVideoDecoder::Input(MediaRawData* aSample)
-{
- if (mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)) {
- return;
- }
- RemoteVideoDecoder::Input(aSample);
-}
-
-void
-RemoteEMEVideoDecoder::Shutdown()
-{
- RemoteVideoDecoder::Shutdown();
-
- mSamplesWaitingForKey->BreakCycles();
- mSamplesWaitingForKey = nullptr;
-}
-
class RemoteAudioDecoder : public RemoteDataDecoder
{
public:
RemoteAudioDecoder(const AudioInfo& aConfig,
MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback,
- const nsString& aDrmStubId)
+ const nsString& aDrmStubId, TaskQueue* aTaskQueue)
: RemoteDataDecoder(MediaData::Type::AUDIO_DATA, aConfig.mMimeType,
- aFormat, aCallback, aDrmStubId)
+ aFormat, aDrmStubId, aTaskQueue)
, mConfig(aConfig)
{
JNIEnv* const env = jni::GetEnvForThread();
bool formatHasCSD = false;
- NS_ENSURE_SUCCESS_VOID(aFormat->ContainsKey(NS_LITERAL_STRING("csd-0"), &formatHasCSD));
+ NS_ENSURE_SUCCESS_VOID(
+ aFormat->ContainsKey(NS_LITERAL_STRING("csd-0"), &formatHasCSD));
if (!formatHasCSD && aConfig.mCodecSpecificConfig->Length() >= 2) {
jni::ByteBuffer::LocalRef buffer(env);
- buffer = jni::ByteBuffer::New(
- aConfig.mCodecSpecificConfig->Elements(),
+ buffer = jni::ByteBuffer::New(aConfig.mCodecSpecificConfig->Elements(),
aConfig.mCodecSpecificConfig->Length());
- NS_ENSURE_SUCCESS_VOID(aFormat->SetByteBuffer(NS_LITERAL_STRING("csd-0"),
- buffer));
+ NS_ENSURE_SUCCESS_VOID(
+ aFormat->SetByteBuffer(NS_LITERAL_STRING("csd-0"), buffer));
}
}
RefPtr<InitPromise> Init() override
{
// Register native methods.
JavaCallbacksSupport::Init();
mJavaCallbacks = CodecProxy::NativeCallbacks::New();
- JavaCallbacksSupport::AttachNative(mJavaCallbacks,
- mozilla::MakeUnique<CallbacksSupport>(this, mCallback));
+ JavaCallbacksSupport::AttachNative(
+ mJavaCallbacks, mozilla::MakeUnique<CallbacksSupport>(this));
- mJavaDecoder = CodecProxy::Create(mFormat, nullptr, mJavaCallbacks, mDrmStubId);
+ mJavaDecoder =
+ CodecProxy::Create(mFormat, nullptr, mJavaCallbacks, mDrmStubId);
if (mJavaDecoder == nullptr) {
- return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ __func__);
}
return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__);
}
private:
class CallbacksSupport final : public JavaCallbacksSupport
{
public:
- CallbacksSupport(RemoteAudioDecoder* aDecoder, MediaDataDecoderCallback* aCallback)
- : JavaCallbacksSupport(aCallback)
- , mDecoder(aDecoder)
- {}
+ CallbacksSupport(RemoteAudioDecoder* aDecoder) : mDecoder(aDecoder) { }
- virtual ~CallbacksSupport() {}
+ void HandleInputExhausted() override
+ {
+ mDecoder->InputExhausted();
+ }
void HandleOutput(Sample::Param aSample) override
{
BufferInfo::LocalRef info = aSample->Info();
int32_t flags;
bool ok = NS_SUCCEEDED(info->Flags(&flags));
- MOZ_ASSERT(ok);
int32_t offset;
- ok |= NS_SUCCEEDED(info->Offset(&offset));
- MOZ_ASSERT(ok);
+ ok &= NS_SUCCEEDED(info->Offset(&offset));
int64_t presentationTimeUs;
- ok |= NS_SUCCEEDED(info->PresentationTimeUs(&presentationTimeUs));
- MOZ_ASSERT(ok);
+ ok &= NS_SUCCEEDED(info->PresentationTimeUs(&presentationTimeUs));
int32_t size;
- ok |= NS_SUCCEEDED(info->Size(&size));
- MOZ_ASSERT(ok);
+ ok &= NS_SUCCEEDED(info->Size(&size));
- NS_ENSURE_TRUE_VOID(ok);
+ if (!ok) {
+ HandleError(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("AudioCallBack::HandleOutput")));
+ return;
+ }
if (size > 0) {
#ifdef MOZ_SAMPLE_TYPE_S16
const int32_t numSamples = size / 2;
#else
#error We only support 16-bit integer PCM
#endif
const int32_t numFrames = numSamples / mOutputChannels;
AlignedAudioBuffer audio(numSamples);
if (!audio) {
+ mDecoder->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
return;
}
- jni::ByteBuffer::LocalRef dest = jni::ByteBuffer::New(audio.get(), size);
+ jni::ByteBuffer::LocalRef dest =
+ jni::ByteBuffer::New(audio.get(), size);
aSample->WriteToByteBuffer(dest);
- RefPtr<AudioData> data = new AudioData(0, presentationTimeUs,
- FramesToUsecs(numFrames, mOutputSampleRate).value(),
- numFrames,
- Move(audio),
- mOutputChannels,
- mOutputSampleRate);
+ RefPtr<AudioData> data = new AudioData(
+ 0, presentationTimeUs,
+ FramesToUsecs(numFrames, mOutputSampleRate).value(), numFrames,
+ Move(audio), mOutputChannels, mOutputSampleRate);
- mDecoderCallback->Output(data);
+ mDecoder->Output(data);
}
if ((flags & MediaCodec::BUFFER_FLAG_END_OF_STREAM) != 0) {
- mDecoderCallback->DrainComplete();
- return;
+ mDecoder->DrainComplete();
}
}
void HandleOutputFormatChanged(MediaFormat::Param aFormat) override
{
aFormat->GetInteger(NS_LITERAL_STRING("channel-count"), &mOutputChannels);
AudioConfig::ChannelLayout layout(mOutputChannels);
if (!layout.IsValid()) {
- mDecoderCallback->Error(MediaResult(
+ mDecoder->Error(MediaResult(
NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Invalid channel layout:%d", mOutputChannels)));
return;
}
aFormat->GetInteger(NS_LITERAL_STRING("sample-rate"), &mOutputSampleRate);
- LOG("Audio output format changed: channels:%d sample rate:%d", mOutputChannels, mOutputSampleRate);
+ LOG("Audio output format changed: channels:%d sample rate:%d",
+ mOutputChannels, mOutputSampleRate);
+ }
+
+ void HandleError(const MediaResult& aError) override
+ {
+ mDecoder->Error(aError);
}
private:
RemoteAudioDecoder* mDecoder;
int32_t mOutputChannels;
int32_t mOutputSampleRate;
};
- const AudioInfo& mConfig;
+ const AudioInfo mConfig;
};
-class RemoteEMEAudioDecoder : public RemoteAudioDecoder {
-public:
- RemoteEMEAudioDecoder(const AudioInfo& aConfig, MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback, const nsString& aDrmStubId,
- CDMProxy* aProxy, TaskQueue* aTaskQueue)
- : RemoteAudioDecoder(aConfig, aFormat, aCallback, aDrmStubId)
- , mSamplesWaitingForKey(new SamplesWaitingForKey(this, aCallback,
- aTaskQueue, aProxy))
- {
+already_AddRefed<MediaDataDecoder>
+RemoteDataDecoder::CreateAudioDecoder(const AudioInfo& aConfig,
+ MediaFormat::Param aFormat,
+ const nsString& aDrmStubId,
+ CDMProxy* aProxy, TaskQueue* aTaskQueue)
+{
+ RefPtr<MediaDataDecoder> decoder;
+ if (!aProxy) {
+ decoder = new RemoteAudioDecoder(aConfig, aFormat, aDrmStubId, aTaskQueue);
+ } else {
+ // TODO in bug 1334061.
}
-
- void Input(MediaRawData* aSample) override;
- void Shutdown() override;
-
-private:
- RefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
-};
-
-void
-RemoteEMEAudioDecoder::Input(MediaRawData* aSample)
-{
- if (mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)) {
- return;
- }
- RemoteAudioDecoder::Input(aSample);
-}
-
-void
-RemoteEMEAudioDecoder::Shutdown()
-{
- RemoteAudioDecoder::Shutdown();
-
- mSamplesWaitingForKey->BreakCycles();
- mSamplesWaitingForKey = nullptr;
+ return decoder.forget();
}
-MediaDataDecoder*
-RemoteDataDecoder::CreateAudioDecoder(const AudioInfo& aConfig,
- MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback,
- const nsString& aDrmStubId,
- CDMProxy* aProxy,
- TaskQueue* aTaskQueue)
-{
- if (!aProxy) {
- return new RemoteAudioDecoder(aConfig, aFormat, aCallback, aDrmStubId);
- } else {
- return new RemoteEMEAudioDecoder(aConfig,
- aFormat,
- aCallback,
- aDrmStubId,
- aProxy,
- aTaskQueue);
- }
-}
-
-MediaDataDecoder*
+already_AddRefed<MediaDataDecoder>
RemoteDataDecoder::CreateVideoDecoder(const VideoInfo& aConfig,
MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer,
const nsString& aDrmStubId,
- CDMProxy* aProxy,
- TaskQueue* aTaskQueue)
+ CDMProxy* aProxy, TaskQueue* aTaskQueue)
{
+ RefPtr<MediaDataDecoder> decoder;
if (!aProxy) {
- return new RemoteVideoDecoder(aConfig, aFormat, aCallback, aImageContainer, aDrmStubId);
+ decoder = new RemoteVideoDecoder(aConfig, aFormat, aImageContainer,
+ aDrmStubId, aTaskQueue);
} else {
- return new RemoteEMEVideoDecoder(aConfig,
- aFormat,
- aCallback,
- aImageContainer,
- aDrmStubId,
- aProxy,
- aTaskQueue);
+ // TODO in bug 1334061.
}
+ return decoder.forget();
}
RemoteDataDecoder::RemoteDataDecoder(MediaData::Type aType,
const nsACString& aMimeType,
MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback,
- const nsString& aDrmStubId)
+ const nsString& aDrmStubId,
+ TaskQueue* aTaskQueue)
: mType(aType)
, mMimeType(aMimeType)
, mFormat(aFormat)
- , mCallback(aCallback)
, mDrmStubId(aDrmStubId)
+ , mTaskQueue(aTaskQueue)
{
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
RemoteDataDecoder::Flush()
{
- mJavaDecoder->Flush();
+ RefPtr<RemoteDataDecoder> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [self, this]() {
+ mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ mDrainPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ mJavaDecoder->Flush();
+ return FlushPromise::CreateAndResolve(true, __func__);
+ });
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
RemoteDataDecoder::Drain()
{
- BufferInfo::LocalRef bufferInfo;
- nsresult rv = BufferInfo::New(&bufferInfo);
- NS_ENSURE_SUCCESS_VOID(rv);
- bufferInfo->Set(0, 0, -1, MediaCodec::BUFFER_FLAG_END_OF_STREAM);
+ RefPtr<RemoteDataDecoder> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [self, this]() {
+ BufferInfo::LocalRef bufferInfo;
+ nsresult rv = BufferInfo::New(&bufferInfo);
+ if (NS_FAILED(rv)) {
+ return DecodePromise::CreateAndReject(NS_ERROR_OUT_OF_MEMORY, __func__);
+ }
+ bufferInfo->Set(0, 0, -1, MediaCodec::BUFFER_FLAG_END_OF_STREAM);
- mJavaDecoder->Input(nullptr, bufferInfo, nullptr);
+ RefPtr<DecodePromise> p = mDrainPromise.Ensure(__func__);
+ mJavaDecoder->Input(nullptr, bufferInfo, nullptr);
+ return p;
+ });
}
-void
+RefPtr<ShutdownPromise>
RemoteDataDecoder::Shutdown()
{
LOG("");
+ RefPtr<RemoteDataDecoder> self = this;
+ return InvokeAsync(mTaskQueue, this, __func__,
+ &RemoteDataDecoder::ProcessShutdown);
+}
+RefPtr<ShutdownPromise>
+RemoteDataDecoder::ProcessShutdown()
+{
+ AssertOnTaskQueue();
+ mShutdown = true;
if (mJavaDecoder) {
mJavaDecoder->Release();
mJavaDecoder = nullptr;
}
if (mJavaCallbacks) {
JavaCallbacksSupport::GetNative(mJavaCallbacks)->Cancel();
mJavaCallbacks = nullptr;
}
mFormat = nullptr;
+
+ return ShutdownPromise::CreateAndResolve(true, __func__);
}
-void
-RemoteDataDecoder::Input(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+RemoteDataDecoder::Decode(MediaRawData* aSample)
{
MOZ_ASSERT(aSample != nullptr);
- jni::ByteBuffer::LocalRef bytes = jni::ByteBuffer::New(const_cast<uint8_t*>(aSample->Data()),
- aSample->Size());
+ RefPtr<RemoteDataDecoder> self = this;
+ RefPtr<MediaRawData> sample = aSample;
+ return InvokeAsync(mTaskQueue, __func__, [self, sample, this]() {
+ jni::ByteBuffer::LocalRef bytes = jni::ByteBuffer::New(
+ const_cast<uint8_t*>(sample->Data()), sample->Size());
+
+ BufferInfo::LocalRef bufferInfo;
+ nsresult rv = BufferInfo::New(&bufferInfo);
+ if (NS_FAILED(rv)) {
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
+ }
+ bufferInfo->Set(0, sample->Size(), sample->mTime, 0);
- BufferInfo::LocalRef bufferInfo;
- nsresult rv = BufferInfo::New(&bufferInfo);
- if (NS_FAILED(rv)) {
- mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
+ RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
+ mJavaDecoder->Input(bytes, bufferInfo, GetCryptoInfoFromSample(sample));
+ return p;
+ });
+}
+
+void
+RemoteDataDecoder::Output(MediaData* aSample)
+{
+ if (!mTaskQueue->IsCurrentThreadIn()) {
+ mTaskQueue->Dispatch(
+ NewRunnableMethod<MediaData*>(this, &RemoteDataDecoder::Output, aSample));
+ return;
+ }
+ AssertOnTaskQueue();
+ if (mShutdown) {
return;
}
- bufferInfo->Set(0, aSample->Size(), aSample->mTime, 0);
+ mDecodedData.AppendElement(aSample);
+}
+
+void
+RemoteDataDecoder::InputExhausted()
+{
+ if (!mTaskQueue->IsCurrentThreadIn()) {
+ mTaskQueue->Dispatch(
+ NewRunnableMethod(this, &RemoteDataDecoder::InputExhausted));
+ return;
+ }
+ AssertOnTaskQueue();
+ if (mShutdown) {
+ return;
+ }
+ mDecodePromise.ResolveIfExists(mDecodedData, __func__);
+ mDecodedData.Clear();
+}
- mJavaDecoder->Input(bytes, bufferInfo, GetCryptoInfoFromSample(aSample));
+void
+RemoteDataDecoder::DrainComplete()
+{
+ if (!mTaskQueue->IsCurrentThreadIn()) {
+ mTaskQueue->Dispatch(
+ NewRunnableMethod(this, &RemoteDataDecoder::DrainComplete));
+ return;
+ }
+ AssertOnTaskQueue();
+ if (mShutdown) {
+ return;
+ }
+ mDrainPromise.ResolveIfExists(mDecodedData, __func__);
+ mDecodedData.Clear();
+}
+
+void
+RemoteDataDecoder::Error(const MediaResult& aError)
+{
+ if (!mTaskQueue->IsCurrentThreadIn()) {
+ mTaskQueue->Dispatch(
+ NewRunnableMethod<MediaResult>(this, &RemoteDataDecoder::Error, aError));
+ return;
+ }
+ AssertOnTaskQueue();
+ if (mShutdown) {
+ return;
+ }
+ mDecodePromise.RejectIfExists(aError, __func__);
+ mDrainPromise.RejectIfExists(aError, __func__);
+ mDecodedData.Clear();
}
} // mozilla
--- a/dom/media/platforms/android/RemoteDataDecoder.h
+++ b/dom/media/platforms/android/RemoteDataDecoder.h
@@ -9,62 +9,70 @@
#include "FennecJNIWrappers.h"
#include "SurfaceTexture.h"
#include "TimeUnits.h"
#include "mozilla/Monitor.h"
#include "mozilla/Maybe.h"
-#include <deque>
-
namespace mozilla {
-class RemoteDataDecoder : public MediaDataDecoder {
+class RemoteDataDecoder : public MediaDataDecoder
+{
public:
- static MediaDataDecoder* CreateAudioDecoder(const AudioInfo& aConfig,
- java::sdk::MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback,
- const nsString& aDrmStubId,
- CDMProxy* aProxy,
- TaskQueue* aTaskQueue);
+ static already_AddRefed<MediaDataDecoder> CreateAudioDecoder(
+ const AudioInfo& aConfig, java::sdk::MediaFormat::Param aFormat,
+ const nsString& aDrmStubId, CDMProxy* aProxy, TaskQueue* aTaskQueue);
- static MediaDataDecoder* CreateVideoDecoder(const VideoInfo& aConfig,
- java::sdk::MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback,
- layers::ImageContainer* aImageContainer,
- const nsString& aDrmStubId,
- CDMProxy* aProxy,
- TaskQueue* aTaskQueue);
+ static already_AddRefed<MediaDataDecoder> CreateVideoDecoder(
+ const VideoInfo& aConfig, java::sdk::MediaFormat::Param aFormat,
+ layers::ImageContainer* aImageContainer, const nsString& aDrmStubId,
+ CDMProxy* aProxy, TaskQueue* aTaskQueue);
- virtual ~RemoteDataDecoder() {}
+ virtual ~RemoteDataDecoder() { }
- void Flush() override;
- void Drain() override;
- void Shutdown() override;
- void Input(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
const char* GetDescriptionName() const override
{
return "android remote decoder";
}
protected:
RemoteDataDecoder(MediaData::Type aType,
const nsACString& aMimeType,
java::sdk::MediaFormat::Param aFormat,
- MediaDataDecoderCallback* aCallback,
- const nsString& aDrmStubId);
+ const nsString& aDrmStubId, TaskQueue* aTaskQueue);
+
+ // Methods only called on mTaskQueue.
+ RefPtr<ShutdownPromise> ProcessShutdown();
+ void Output(MediaData* aSample);
+ void InputExhausted();
+ void DrainComplete();
+ void Error(const MediaResult& aError);
+ void AssertOnTaskQueue()
+ {
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ }
MediaData::Type mType;
nsAutoCString mMimeType;
java::sdk::MediaFormat::GlobalRef mFormat;
- MediaDataDecoderCallback* mCallback;
-
java::CodecProxy::GlobalRef mJavaDecoder;
java::CodecProxy::NativeCallbacks::GlobalRef mJavaCallbacks;
nsString mDrmStubId;
+
+ RefPtr<TaskQueue> mTaskQueue;
+ // Only ever accessed on mTaskqueue.
+ bool mShutdown = false;
+ MozPromiseHolder<DecodePromise> mDecodePromise;
+ MozPromiseHolder<DecodePromise> mDrainPromise;
+ DecodedData mDecodedData;
};
} // namespace mozilla
#endif
--- a/dom/media/platforms/apple/AppleATDecoder.cpp
+++ b/dom/media/platforms/apple/AppleATDecoder.cpp
@@ -14,25 +14,22 @@
#include "mozilla/UniquePtr.h"
#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
#define FourCC2Str(n) ((char[5]){(char)(n >> 24), (char)(n >> 16), (char)(n >> 8), (char)(n), 0})
namespace mozilla {
AppleATDecoder::AppleATDecoder(const AudioInfo& aConfig,
- TaskQueue* aTaskQueue,
- MediaDataDecoderCallback* aCallback)
+ TaskQueue* aTaskQueue)
: mConfig(aConfig)
, mFileStreamError(false)
, mTaskQueue(aTaskQueue)
- , mCallback(aCallback)
, mConverter(nullptr)
, mStream(nullptr)
- , mIsFlushing(false)
, mParsedFramesForAACMagicCookie(0)
, mErrored(false)
{
MOZ_COUNT_CTOR(AppleATDecoder);
LOG("Creating Apple AudioToolbox decoder");
LOG("Audio Decoder configuration: %s %d Hz %d channels %d bits per channel",
mConfig.mMimeType.get(),
mConfig.mRate,
@@ -60,84 +57,76 @@ AppleATDecoder::Init()
if (!mFormatID) {
NS_ERROR("Non recognised format");
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
}
return InitPromise::CreateAndResolve(TrackType::kAudioTrack, __func__);
}
-void
-AppleATDecoder::Input(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+AppleATDecoder::Decode(MediaRawData* aSample)
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
- LOG("mp4 input sample %p %lld us %lld pts%s %llu bytes audio",
- aSample,
- aSample->mDuration,
- aSample->mTime,
- aSample->mKeyframe ? " keyframe" : "",
+ LOG("mp4 input sample %p %lld us %lld pts%s %llu bytes audio", aSample,
+ aSample->mDuration, aSample->mTime, aSample->mKeyframe ? " keyframe" : "",
(unsigned long long)aSample->Size());
-
- // Queue a task to perform the actual decoding on a separate thread.
- nsCOMPtr<nsIRunnable> runnable =
- NewRunnableMethod<RefPtr<MediaRawData>>(
- this,
- &AppleATDecoder::SubmitSample,
- RefPtr<MediaRawData>(aSample));
- mTaskQueue->Dispatch(runnable.forget());
+ RefPtr<AppleATDecoder> self = this;
+ RefPtr<MediaRawData> sample = aSample;
+ return InvokeAsync(mTaskQueue, __func__, [self, this, sample] {
+ return ProcessDecode(sample);
+ });
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
AppleATDecoder::ProcessFlush()
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
mQueuedSamples.Clear();
+ mDecodedSamples.Clear();
+
if (mConverter) {
OSStatus rv = AudioConverterReset(mConverter);
if (rv) {
LOG("Error %d resetting AudioConverter", rv);
}
}
if (mErrored) {
mParsedFramesForAACMagicCookie = 0;
mMagicCookie.Clear();
ProcessShutdown();
mErrored = false;
}
+ return FlushPromise::CreateAndResolve(true, __func__);
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
AppleATDecoder::Flush()
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
LOG("Flushing AudioToolbox AAC decoder");
- mIsFlushing = true;
- nsCOMPtr<nsIRunnable> runnable =
- NewRunnableMethod(this, &AppleATDecoder::ProcessFlush);
- SyncRunnable::DispatchToThread(mTaskQueue, runnable);
- mIsFlushing = false;
+ return InvokeAsync(mTaskQueue, this, __func__, &AppleATDecoder::ProcessFlush);
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
AppleATDecoder::Drain()
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
LOG("Draining AudioToolbox AAC decoder");
- mTaskQueue->AwaitIdle();
- mCallback->DrainComplete();
- Flush();
+ RefPtr<AppleATDecoder> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [] {
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
+ });
}
-void
+RefPtr<ShutdownPromise>
AppleATDecoder::Shutdown()
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
- nsCOMPtr<nsIRunnable> runnable =
- NewRunnableMethod(this, &AppleATDecoder::ProcessShutdown);
- SyncRunnable::DispatchToThread(mTaskQueue, runnable);
+ RefPtr<AppleATDecoder> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [self, this]() {
+ ProcessShutdown();
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+ });
}
void
AppleATDecoder::ProcessShutdown()
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
if (mStream) {
@@ -195,48 +184,43 @@ static OSStatus
aData->mBuffers[0].mData = const_cast<void*>(userData->mData);
// No more data to provide following this run.
userData->mDataSize = 0;
return noErr;
}
-void
-AppleATDecoder::SubmitSample(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+AppleATDecoder::ProcessDecode(MediaRawData* aSample)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
- if (mIsFlushing) {
- return;
- }
-
MediaResult rv = NS_OK;
if (!mConverter) {
rv = SetupDecoder(aSample);
if (rv != NS_OK && rv != NS_ERROR_NOT_INITIALIZED) {
- mCallback->Error(rv);
- return;
+ return DecodePromise::CreateAndReject(rv, __func__);
}
}
mQueuedSamples.AppendElement(aSample);
if (rv == NS_OK) {
for (size_t i = 0; i < mQueuedSamples.Length(); i++) {
rv = DecodeSample(mQueuedSamples[i]);
if (NS_FAILED(rv)) {
mErrored = true;
- mCallback->Error(rv);
- return;
+ return DecodePromise::CreateAndReject(rv, __func__);
}
}
mQueuedSamples.Clear();
}
- mCallback->InputExhausted();
+
+ return DecodePromise::CreateAndResolve(Move(mDecodedSamples), __func__);
}
MediaResult
AppleATDecoder::DecodeSample(MediaRawData* aSample)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
// Array containing the queued decoded audio frames, about to be output.
@@ -338,17 +322,17 @@ AppleATDecoder::DecodeSample(MediaRawDat
RefPtr<AudioData> audio = new AudioData(aSample->mOffset,
aSample->mTime,
duration.ToMicroseconds(),
numFrames,
data.Forget(),
channels,
rate);
- mCallback->Output(audio);
+ mDecodedSamples.AppendElement(Move(audio));
return NS_OK;
}
MediaResult
AppleATDecoder::GetInputAudioDescription(AudioStreamBasicDescription& aDesc,
const nsTArray<uint8_t>& aExtraData)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
@@ -491,18 +475,18 @@ AppleATDecoder::SetupChannelLayout()
// directly contains the the channel layout mapping.
// If tag is kAudioChannelLayoutTag_UseChannelBitmap then the layout will
// be defined via the bitmap and can be retrieved using
// kAudioFormatProperty_ChannelLayoutForBitmap property.
// Otherwise the tag itself describes the layout.
if (tag != kAudioChannelLayoutTag_UseChannelDescriptions) {
AudioFormatPropertyID property =
tag == kAudioChannelLayoutTag_UseChannelBitmap
- ? kAudioFormatProperty_ChannelLayoutForBitmap
- : kAudioFormatProperty_ChannelLayoutForTag;
+ ? kAudioFormatProperty_ChannelLayoutForBitmap
+ : kAudioFormatProperty_ChannelLayoutForTag;
if (property == kAudioFormatProperty_ChannelLayoutForBitmap) {
status =
AudioFormatGetPropertyInfo(property,
sizeof(UInt32), &layout->mChannelBitmap,
&propertySize);
} else {
status =
@@ -627,16 +611,18 @@ AppleATDecoder::SetupDecoder(MediaRawDat
static void
_MetadataCallback(void* aAppleATDecoder,
AudioFileStreamID aStream,
AudioFileStreamPropertyID aProperty,
UInt32* aFlags)
{
AppleATDecoder* decoder = static_cast<AppleATDecoder*>(aAppleATDecoder);
+ MOZ_RELEASE_ASSERT(decoder->mTaskQueue->IsCurrentThreadIn());
+
LOG("MetadataCallback receiving: '%s'", FourCC2Str(aProperty));
if (aProperty == kAudioFileStreamProperty_MagicCookieData) {
UInt32 size;
Boolean writeable;
OSStatus rv = AudioFileStreamGetPropertyInfo(aStream,
aProperty,
&size,
&writeable);
--- a/dom/media/platforms/apple/AppleATDecoder.h
+++ b/dom/media/platforms/apple/AppleATDecoder.h
@@ -4,68 +4,65 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_AppleATDecoder_h
#define mozilla_AppleATDecoder_h
#include <AudioToolbox/AudioToolbox.h>
#include "PlatformDecoderModule.h"
-#include "mozilla/ReentrantMonitor.h"
#include "mozilla/Vector.h"
#include "nsIThread.h"
#include "AudioConverter.h"
namespace mozilla {
class TaskQueue;
-class MediaDataDecoderCallback;
class AppleATDecoder : public MediaDataDecoder {
public:
AppleATDecoder(const AudioInfo& aConfig,
- TaskQueue* aTaskQueue,
- MediaDataDecoderCallback* aCallback);
- virtual ~AppleATDecoder();
+ TaskQueue* aTaskQueue);
+ ~AppleATDecoder();
RefPtr<InitPromise> Init() override;
- void Input(MediaRawData* aSample) override;
- void Flush() override;
- void Drain() override;
- void Shutdown() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
const char* GetDescriptionName() const override
{
return "apple CoreMedia decoder";
}
// Callbacks also need access to the config.
const AudioInfo& mConfig;
// Use to extract magic cookie for HE-AAC detection.
nsTArray<uint8_t> mMagicCookie;
// Will be set to true should an error occurred while attempting to retrieve
// the magic cookie property.
bool mFileStreamError;
+ const RefPtr<TaskQueue> mTaskQueue;
+
private:
- const RefPtr<TaskQueue> mTaskQueue;
- MediaDataDecoderCallback* mCallback;
AudioConverterRef mConverter;
AudioStreamBasicDescription mOutputFormat;
UInt32 mFormatID;
AudioFileStreamID mStream;
nsTArray<RefPtr<MediaRawData>> mQueuedSamples;
UniquePtr<AudioConfig::ChannelLayout> mChannelLayout;
UniquePtr<AudioConverter> mAudioConverter;
- Atomic<bool> mIsFlushing;
+ DecodedData mDecodedSamples;
- void ProcessFlush();
+ RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
+ RefPtr<FlushPromise> ProcessFlush();
void ProcessShutdown();
- void SubmitSample(MediaRawData* aSample);
MediaResult DecodeSample(MediaRawData* aSample);
MediaResult GetInputAudioDescription(AudioStreamBasicDescription& aDesc,
const nsTArray<uint8_t>& aExtraData);
// Setup AudioConverter once all information required has been gathered.
// Will return NS_ERROR_NOT_INITIALIZED if more data is required.
MediaResult SetupDecoder(MediaRawData* aSample);
nsresult GetImplicitAACMagicCookie(const MediaRawData* aSample);
nsresult SetupChannelLayout();
--- a/dom/media/platforms/apple/AppleDecoderModule.cpp
+++ b/dom/media/platforms/apple/AppleDecoderModule.cpp
@@ -69,28 +69,25 @@ AppleDecoderModule::Startup()
}
already_AddRefed<MediaDataDecoder>
AppleDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
{
RefPtr<MediaDataDecoder> decoder =
new AppleVTDecoder(aParams.VideoConfig(),
aParams.mTaskQueue,
- aParams.mCallback,
aParams.mImageContainer);
return decoder.forget();
}
already_AddRefed<MediaDataDecoder>
AppleDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
{
RefPtr<MediaDataDecoder> decoder =
- new AppleATDecoder(aParams.AudioConfig(),
- aParams.mTaskQueue,
- aParams.mCallback);
+ new AppleATDecoder(aParams.AudioConfig(), aParams.mTaskQueue);
return decoder.forget();
}
bool
AppleDecoderModule::SupportsMimeType(const nsACString& aMimeType,
DecoderDoctorDiagnostics* aDiagnostics) const
{
return (sIsCoreMediaAvailable &&
--- a/dom/media/platforms/apple/AppleVTDecoder.cpp
+++ b/dom/media/platforms/apple/AppleVTDecoder.cpp
@@ -21,45 +21,43 @@
#include "gfxPlatform.h"
#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
namespace mozilla {
AppleVTDecoder::AppleVTDecoder(const VideoInfo& aConfig,
TaskQueue* aTaskQueue,
- MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer)
: mExtraData(aConfig.mExtraData)
- , mCallback(aCallback)
, mPictureWidth(aConfig.mImage.width)
, mPictureHeight(aConfig.mImage.height)
, mDisplayWidth(aConfig.mDisplay.width)
, mDisplayHeight(aConfig.mDisplay.height)
, mTaskQueue(aTaskQueue)
, mMaxRefFrames(mp4_demuxer::H264::ComputeMaxRefFrames(aConfig.mExtraData))
, mImageContainer(aImageContainer)
- , mIsShutDown(false)
#ifdef MOZ_WIDGET_UIKIT
, mUseSoftwareImages(true)
#else
, mUseSoftwareImages(false)
#endif
, mIsFlushing(false)
- , mMonitor("AppleVideoDecoder")
+ , mMonitor("AppleVTDecoder")
, mFormat(nullptr)
, mSession(nullptr)
, mIsHardwareAccelerated(false)
{
MOZ_COUNT_CTOR(AppleVTDecoder);
// TODO: Verify aConfig.mime_type.
- LOG("Creating AppleVTDecoder for %dx%d h.264 video",
- mDisplayWidth,
- mDisplayHeight
- );
+ LOG("Creating AppleVTDecoder for %dx%d h.264 video", mDisplayWidth,
+ mDisplayHeight);
+
+ // To ensure our PromiseHolder is only ever accessed with the monitor held.
+ mPromise.SetMonitor(&mMonitor);
}
AppleVTDecoder::~AppleVTDecoder()
{
MOZ_COUNT_DTOR(AppleVTDecoder);
}
RefPtr<MediaDataDecoder::InitPromise>
@@ -69,80 +67,150 @@ AppleVTDecoder::Init()
if (NS_SUCCEEDED(rv)) {
return InitPromise::CreateAndResolve(TrackType::kVideoTrack, __func__);
}
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
}
-void
-AppleVTDecoder::Input(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+AppleVTDecoder::Decode(MediaRawData* aSample)
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
-
LOG("mp4 input sample %p pts %lld duration %lld us%s %d bytes",
aSample,
aSample->mTime,
aSample->mDuration,
aSample->mKeyframe ? " keyframe" : "",
aSample->Size());
- mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
- this, &AppleVTDecoder::ProcessDecode, aSample));
+ RefPtr<AppleVTDecoder> self = this;
+ RefPtr<MediaRawData> sample = aSample;
+ return InvokeAsync(mTaskQueue, __func__, [self, this, sample] {
+ RefPtr<DecodePromise> p;
+ {
+ MonitorAutoLock mon(mMonitor);
+ p = mPromise.Ensure(__func__);
+ }
+ ProcessDecode(sample);
+ return p;
+ });
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
AppleVTDecoder::Flush()
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
mIsFlushing = true;
- nsCOMPtr<nsIRunnable> runnable =
- NewRunnableMethod(this, &AppleVTDecoder::ProcessFlush);
- SyncRunnable::DispatchToThread(mTaskQueue, runnable);
- mIsFlushing = false;
+ return InvokeAsync(mTaskQueue, this, __func__, &AppleVTDecoder::ProcessFlush);
+}
+
+RefPtr<MediaDataDecoder::DecodePromise>
+AppleVTDecoder::Drain()
+{
+ return InvokeAsync(mTaskQueue, this, __func__, &AppleVTDecoder::ProcessDrain);
+}
- mSeekTargetThreshold.reset();
+RefPtr<ShutdownPromise>
+AppleVTDecoder::Shutdown()
+{
+ if (mTaskQueue) {
+ RefPtr<AppleVTDecoder> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [self, this]() {
+ ProcessShutdown();
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+ });
+ }
+ ProcessShutdown();
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+}
+
+// Helper to fill in a timestamp structure.
+static CMSampleTimingInfo
+TimingInfoFromSample(MediaRawData* aSample)
+{
+ CMSampleTimingInfo timestamp;
+
+ timestamp.duration = CMTimeMake(aSample->mDuration, USECS_PER_S);
+ timestamp.presentationTimeStamp =
+ CMTimeMake(aSample->mTime, USECS_PER_S);
+ timestamp.decodeTimeStamp =
+ CMTimeMake(aSample->mTimecode, USECS_PER_S);
+
+ return timestamp;
}
void
-AppleVTDecoder::Drain()
-{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
- nsCOMPtr<nsIRunnable> runnable =
- NewRunnableMethod(this, &AppleVTDecoder::ProcessDrain);
- mTaskQueue->Dispatch(runnable.forget());
-}
-
-void
-AppleVTDecoder::Shutdown()
-{
- MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
- mIsShutDown = true;
- if (mTaskQueue) {
- nsCOMPtr<nsIRunnable> runnable =
- NewRunnableMethod(this, &AppleVTDecoder::ProcessShutdown);
- mTaskQueue->Dispatch(runnable.forget());
- } else {
- ProcessShutdown();
- }
-}
-
-nsresult
AppleVTDecoder::ProcessDecode(MediaRawData* aSample)
{
AssertOnTaskQueueThread();
if (mIsFlushing) {
- return NS_OK;
+ MonitorAutoLock mon(mMonitor);
+ mPromise.Reject(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ return;
}
- auto rv = DoDecode(aSample);
+ AutoCFRelease<CMBlockBufferRef> block = nullptr;
+ AutoCFRelease<CMSampleBufferRef> sample = nullptr;
+ VTDecodeInfoFlags infoFlags;
+ OSStatus rv;
+
+ // FIXME: This copies the sample data. I think we can provide
+ // a custom block source which reuses the aSample buffer.
+ // But note that there may be a problem keeping the samples
+ // alive over multiple frames.
+ rv = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault, // Struct allocator.
+ const_cast<uint8_t*>(aSample->Data()),
+ aSample->Size(),
+ kCFAllocatorNull, // Block allocator.
+ NULL, // Block source.
+ 0, // Data offset.
+ aSample->Size(),
+ false,
+ block.receive());
+ if (rv != noErr) {
+ NS_ERROR("Couldn't create CMBlockBuffer");
+ MonitorAutoLock mon(mMonitor);
+ mPromise.Reject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("CMBlockBufferCreateWithMemoryBlock:%x", rv)),
+ __func__);
+ return;
+ }
- return rv;
+ CMSampleTimingInfo timestamp = TimingInfoFromSample(aSample);
+ rv = CMSampleBufferCreate(kCFAllocatorDefault, block, true, 0, 0, mFormat, 1, 1, ×tamp, 0, NULL, sample.receive());
+ if (rv != noErr) {
+ NS_ERROR("Couldn't create CMSampleBuffer");
+ MonitorAutoLock mon(mMonitor);
+ mPromise.Reject(MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("CMSampleBufferCreate:%x", rv)),
+ __func__);
+ return;
+ }
+
+ VTDecodeFrameFlags decodeFlags =
+ kVTDecodeFrame_EnableAsynchronousDecompression;
+ rv = VTDecompressionSessionDecodeFrame(mSession,
+ sample,
+ decodeFlags,
+ CreateAppleFrameRef(aSample),
+ &infoFlags);
+ if (rv != noErr && !(infoFlags & kVTDecodeInfo_FrameDropped)) {
+ LOG("AppleVTDecoder: Error %d VTDecompressionSessionDecodeFrame", rv);
+ NS_WARNING("Couldn't pass frame to decoder");
+ // It appears that even when VTDecompressionSessionDecodeFrame returned a
+ // failure. Decoding sometimes actually get processed.
+ MonitorAutoLock mon(mMonitor);
+ mPromise.RejectIfExists(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("VTDecompressionSessionDecodeFrame:%x", rv)),
+ __func__);
+ return;
+ }
}
void
AppleVTDecoder::ProcessShutdown()
{
if (mSession) {
LOG("%s: cleaning up session %p", __func__, mSession);
VTDecompressionSessionInvalidate(mSession);
@@ -151,67 +219,59 @@ AppleVTDecoder::ProcessShutdown()
}
if (mFormat) {
LOG("%s: releasing format %p", __func__, mFormat);
CFRelease(mFormat);
mFormat = nullptr;
}
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
AppleVTDecoder::ProcessFlush()
{
AssertOnTaskQueueThread();
nsresult rv = WaitForAsynchronousFrames();
if (NS_FAILED(rv)) {
- LOG("AppleVTDecoder::Flush failed waiting for platform decoder "
- "with error:%d.", rv);
+ LOG("AppleVTDecoder::Flush failed waiting for platform decoder");
}
- ClearReorderedFrames();
+ MonitorAutoLock mon(mMonitor);
+ mPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+
+ while (!mReorderQueue.IsEmpty()) {
+ mReorderQueue.Pop();
+ }
+ mSeekTargetThreshold.reset();
+ mIsFlushing = false;
+ return FlushPromise::CreateAndResolve(true, __func__);
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
AppleVTDecoder::ProcessDrain()
{
AssertOnTaskQueueThread();
nsresult rv = WaitForAsynchronousFrames();
if (NS_FAILED(rv)) {
- LOG("AppleVTDecoder::Drain failed waiting for platform decoder "
- "with error:%d.", rv);
+ LOG("AppleVTDecoder::Drain failed waiting for platform decoder");
}
- DrainReorderedFrames();
- mCallback->DrainComplete();
+ MonitorAutoLock mon(mMonitor);
+ DecodedData samples;
+ while (!mReorderQueue.IsEmpty()) {
+ samples.AppendElement(Move(mReorderQueue.Pop()));
+ }
+ return DecodePromise::CreateAndResolve(Move(samples), __func__);
}
AppleVTDecoder::AppleFrameRef*
AppleVTDecoder::CreateAppleFrameRef(const MediaRawData* aSample)
{
MOZ_ASSERT(aSample);
return new AppleFrameRef(*aSample);
}
void
-AppleVTDecoder::DrainReorderedFrames()
-{
- MonitorAutoLock mon(mMonitor);
- while (!mReorderQueue.IsEmpty()) {
- mCallback->Output(mReorderQueue.Pop().get());
- }
-}
-
-void
-AppleVTDecoder::ClearReorderedFrames()
-{
- MonitorAutoLock mon(mMonitor);
- while (!mReorderQueue.IsEmpty()) {
- mReorderQueue.Pop();
- }
-}
-
-void
AppleVTDecoder::SetSeekThreshold(const media::TimeUnit& aTime)
{
LOG("SetSeekThreshold %lld", aTime.ToMicroseconds());
mSeekTargetThreshold = Some(aTime);
}
//
// Implementation details.
@@ -242,42 +302,44 @@ PlatformCallback(void* decompressionOutp
NS_WARNING("VideoToolbox decoder returned no data");
image = nullptr;
} else if (flags & kVTDecodeInfo_FrameDropped) {
NS_WARNING(" ...frame tagged as dropped...");
} else {
MOZ_ASSERT(CFGetTypeID(image) == CVPixelBufferGetTypeID(),
"VideoToolbox returned an unexpected image type");
}
+
decoder->OutputFrame(image, *frameRef);
}
// Copy and return a decoded frame.
-nsresult
+void
AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage,
AppleVTDecoder::AppleFrameRef aFrameRef)
{
- if (mIsShutDown || mIsFlushing) {
+ if (mIsFlushing) {
// We are in the process of flushing or shutting down; ignore frame.
- return NS_OK;
+ return;
}
LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
aFrameRef.byte_offset,
aFrameRef.decode_timestamp.ToMicroseconds(),
aFrameRef.composition_timestamp.ToMicroseconds(),
aFrameRef.duration.ToMicroseconds(),
aFrameRef.is_sync_point ? " keyframe" : ""
);
if (!aImage) {
// Image was dropped by decoder or none return yet.
// We need more input to continue.
- mCallback->InputExhausted();
- return NS_OK;
+ MonitorAutoLock mon(mMonitor);
+ mPromise.Resolve(DecodedData(), __func__);
+ return;
}
bool useNullSample = false;
if (mSeekTargetThreshold.isSome()) {
if ((aFrameRef.composition_timestamp + aFrameRef.duration) < mSeekTargetThreshold.ref()) {
useNullSample = true;
} else {
mSeekTargetThreshold.reset();
@@ -305,20 +367,22 @@ AppleVTDecoder::OutputFrame(CVPixelBuffe
MOZ_ASSERT(planes == 2, "Likely not NV12 format and it must be.");
VideoData::YCbCrBuffer buffer;
// Lock the returned image data.
CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
if (rv != kCVReturnSuccess) {
NS_ERROR("error locking pixel data");
- mCallback->Error(
- MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
- RESULT_DETAIL("CVPixelBufferLockBaseAddress:%x", rv)));
- return NS_ERROR_DOM_MEDIA_DECODE_ERR;
+ MonitorAutoLock mon(mMonitor);
+ mPromise.Reject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("CVPixelBufferLockBaseAddress:%x", rv)),
+ __func__);
+ return;
}
// Y plane.
buffer.mPlanes[0].mData =
static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0));
buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0);
buffer.mPlanes[0].mWidth = width;
buffer.mPlanes[0].mHeight = height;
buffer.mPlanes[0].mOffset = 0;
@@ -373,119 +437,46 @@ AppleVTDecoder::OutputFrame(CVPixelBuffe
visible);
#else
MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
#endif
}
if (!data) {
NS_ERROR("Couldn't create VideoData for frame");
- mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
- return NS_ERROR_OUT_OF_MEMORY;
+ MonitorAutoLock mon(mMonitor);
+ mPromise.Reject(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
+ return;
}
// Frames come out in DTS order but we need to output them
// in composition order.
MonitorAutoLock mon(mMonitor);
mReorderQueue.Push(data);
- if (mReorderQueue.Length() > mMaxRefFrames) {
- mCallback->Output(mReorderQueue.Pop().get());
+ DecodedData results;
+ while (mReorderQueue.Length() > mMaxRefFrames) {
+ results.AppendElement(mReorderQueue.Pop());
}
- mCallback->InputExhausted();
+ mPromise.Resolve(Move(results), __func__);
+
LOG("%llu decoded frames queued",
static_cast<unsigned long long>(mReorderQueue.Length()));
-
- return NS_OK;
}
nsresult
AppleVTDecoder::WaitForAsynchronousFrames()
{
OSStatus rv = VTDecompressionSessionWaitForAsynchronousFrames(mSession);
if (rv != noErr) {
- LOG("AppleVTDecoder: Error %d waiting for asynchronous frames", rv);
+ NS_ERROR("AppleVTDecoder: Error waiting for asynchronous frames");
return NS_ERROR_FAILURE;
}
return NS_OK;
}
-// Helper to fill in a timestamp structure.
-static CMSampleTimingInfo
-TimingInfoFromSample(MediaRawData* aSample)
-{
- CMSampleTimingInfo timestamp;
-
- timestamp.duration = CMTimeMake(aSample->mDuration, USECS_PER_S);
- timestamp.presentationTimeStamp =
- CMTimeMake(aSample->mTime, USECS_PER_S);
- timestamp.decodeTimeStamp =
- CMTimeMake(aSample->mTimecode, USECS_PER_S);
-
- return timestamp;
-}
-
-MediaResult
-AppleVTDecoder::DoDecode(MediaRawData* aSample)
-{
- AssertOnTaskQueueThread();
-
- // For some reason this gives me a double-free error with stagefright.
- AutoCFRelease<CMBlockBufferRef> block = nullptr;
- AutoCFRelease<CMSampleBufferRef> sample = nullptr;
- VTDecodeInfoFlags infoFlags;
- OSStatus rv;
-
- // FIXME: This copies the sample data. I think we can provide
- // a custom block source which reuses the aSample buffer.
- // But note that there may be a problem keeping the samples
- // alive over multiple frames.
- rv = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault, // Struct allocator.
- const_cast<uint8_t*>(aSample->Data()),
- aSample->Size(),
- kCFAllocatorNull, // Block allocator.
- NULL, // Block source.
- 0, // Data offset.
- aSample->Size(),
- false,
- block.receive());
- if (rv != noErr) {
- NS_ERROR("Couldn't create CMBlockBuffer");
- mCallback->Error(
- MediaResult(NS_ERROR_OUT_OF_MEMORY,
- RESULT_DETAIL("CMBlockBufferCreateWithMemoryBlock:%x", rv)));
- return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
- }
- CMSampleTimingInfo timestamp = TimingInfoFromSample(aSample);
- rv = CMSampleBufferCreate(kCFAllocatorDefault, block, true, 0, 0, mFormat, 1, 1, ×tamp, 0, NULL, sample.receive());
- if (rv != noErr) {
- NS_ERROR("Couldn't create CMSampleBuffer");
- mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
- RESULT_DETAIL("CMSampleBufferCreate:%x", rv)));
- return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
- }
-
- VTDecodeFrameFlags decodeFlags =
- kVTDecodeFrame_EnableAsynchronousDecompression;
- rv = VTDecompressionSessionDecodeFrame(mSession,
- sample,
- decodeFlags,
- CreateAppleFrameRef(aSample),
- &infoFlags);
- if (rv != noErr && !(infoFlags & kVTDecodeInfo_FrameDropped)) {
- LOG("AppleVTDecoder: Error %d VTDecompressionSessionDecodeFrame", rv);
- NS_WARNING("Couldn't pass frame to decoder");
- mCallback->Error(
- MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
- RESULT_DETAIL("VTDecompressionSessionDecodeFrame:%x", rv)));
- return NS_ERROR_DOM_MEDIA_DECODE_ERR;
- }
-
- return NS_OK;
-}
-
nsresult
AppleVTDecoder::InitializeSession()
{
OSStatus rv;
AutoCFRelease<CFDictionaryRef> extensions = CreateDecoderExtensions();
rv = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
@@ -665,10 +656,9 @@ AppleVTDecoder::CreateOutputConfiguratio
ArrayLength(outputKeys),
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
#else
MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
#endif
}
-
} // namespace mozilla
--- a/dom/media/platforms/apple/AppleVTDecoder.h
+++ b/dom/media/platforms/apple/AppleVTDecoder.h
@@ -16,17 +16,16 @@
#include "VideoToolbox/VideoToolbox.h"
namespace mozilla {
class AppleVTDecoder : public MediaDataDecoder {
public:
AppleVTDecoder(const VideoInfo& aConfig,
TaskQueue* aTaskQueue,
- MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer);
class AppleFrameRef {
public:
media::TimeUnit decode_timestamp;
media::TimeUnit composition_timestamp;
media::TimeUnit duration;
int64_t byte_offset;
@@ -38,84 +37,79 @@ public:
, duration(media::TimeUnit::FromMicroseconds(aSample.mDuration))
, byte_offset(aSample.mOffset)
, is_sync_point(aSample.mKeyframe)
{
}
};
RefPtr<InitPromise> Init() override;
- void Input(MediaRawData* aSample) override;
- void Flush() override;
- void Drain() override;
- void Shutdown() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
void SetSeekThreshold(const media::TimeUnit& aTime) override;
bool IsHardwareAccelerated(nsACString& aFailureReason) const override
{
return mIsHardwareAccelerated;
}
const char* GetDescriptionName() const override
{
return mIsHardwareAccelerated
- ? "apple hardware VT decoder"
- : "apple software VT decoder";
+ ? "apple hardware VT decoder"
+ : "apple software VT decoder";
}
// Access from the taskqueue and the decoder's thread.
// OutputFrame is thread-safe.
- nsresult OutputFrame(CVPixelBufferRef aImage,
- AppleFrameRef aFrameRef);
+ void OutputFrame(CVPixelBufferRef aImage, AppleFrameRef aFrameRef);
private:
virtual ~AppleVTDecoder();
- void ProcessFlush();
- void ProcessDrain();
+ RefPtr<FlushPromise> ProcessFlush();
+ RefPtr<DecodePromise> ProcessDrain();
void ProcessShutdown();
- nsresult ProcessDecode(MediaRawData* aSample);
+ void ProcessDecode(MediaRawData* aSample);
void AssertOnTaskQueueThread()
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
}
AppleFrameRef* CreateAppleFrameRef(const MediaRawData* aSample);
- void DrainReorderedFrames();
- void ClearReorderedFrames();
CFDictionaryRef CreateOutputConfiguration();
const RefPtr<MediaByteBuffer> mExtraData;
- MediaDataDecoderCallback* mCallback;
const uint32_t mPictureWidth;
const uint32_t mPictureHeight;
const uint32_t mDisplayWidth;
const uint32_t mDisplayHeight;
// Method to set up the decompression session.
nsresult InitializeSession();
nsresult WaitForAsynchronousFrames();
CFDictionaryRef CreateDecoderSpecification();
CFDictionaryRef CreateDecoderExtensions();
- // Method to pass a frame to VideoToolbox for decoding.
- MediaResult DoDecode(MediaRawData* aSample);
const RefPtr<TaskQueue> mTaskQueue;
const uint32_t mMaxRefFrames;
const RefPtr<layers::ImageContainer> mImageContainer;
- Atomic<bool> mIsShutDown;
const bool mUseSoftwareImages;
// Set on reader/decode thread calling Flush() to indicate that output is
// not required and so input samples on mTaskQueue need not be processed.
// Cleared on mTaskQueue in ProcessDrain().
Atomic<bool> mIsFlushing;
- // Protects mReorderQueue.
+ // Protects mReorderQueue and mPromise.
Monitor mMonitor;
ReorderQueue mReorderQueue;
+ MozPromiseHolder<DecodePromise> mPromise;
+
// Decoded frame will be dropped if its pts is smaller than this
// value. It shold be initialized before Input() or after Flush(). So it is
// safe to access it in OutputFrame without protecting.
Maybe<media::TimeUnit> mSeekTargetThreshold;
CMVideoFormatDescriptionRef mFormat;
VTDecompressionSessionRef mSession;
Atomic<bool> mIsHardwareAccelerated;
--- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
@@ -10,19 +10,18 @@
#include "TimeUnits.h"
#define MAX_CHANNELS 16
namespace mozilla
{
FFmpegAudioDecoder<LIBAV_VER>::FFmpegAudioDecoder(FFmpegLibWrapper* aLib,
- TaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
- const AudioInfo& aConfig)
- : FFmpegDataDecoder(aLib, aTaskQueue, aCallback, GetCodecId(aConfig.mMimeType))
+ TaskQueue* aTaskQueue, const AudioInfo& aConfig)
+ : FFmpegDataDecoder(aLib, aTaskQueue, GetCodecId(aConfig.mMimeType))
{
MOZ_COUNT_CTOR(FFmpegAudioDecoder);
// Use a new MediaByteBuffer as the object will be modified during initialization.
if (aConfig.mCodecSpecificConfig && aConfig.mCodecSpecificConfig->Length()) {
mExtraData = new MediaByteBuffer;
mExtraData->AppendElements(*aConfig.mCodecSpecificConfig);
}
}
@@ -112,108 +111,118 @@ CopyAndPackAudio(AVFrame* aFrame, uint32
*tmp++ = AudioSampleToFloat(data[channel][frame]);
}
}
}
return audio;
}
-MediaResult
-FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+FFmpegAudioDecoder<LIBAV_VER>::ProcessDecode(MediaRawData* aSample)
{
AVPacket packet;
mLib->av_init_packet(&packet);
packet.data = const_cast<uint8_t*>(aSample->Data());
packet.size = aSample->Size();
if (!PrepareFrame()) {
- return MediaResult(
- NS_ERROR_OUT_OF_MEMORY,
- RESULT_DETAIL("FFmpeg audio decoder failed to allocate frame"));
+ return DecodePromise::CreateAndReject(
+ MediaResult(
+ NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("FFmpeg audio decoder failed to allocate frame")),
+ __func__);
}
int64_t samplePosition = aSample->mOffset;
media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime);
+ DecodedData results;
while (packet.size > 0) {
int decoded;
int bytesConsumed =
mLib->avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet);
if (bytesConsumed < 0) {
NS_WARNING("FFmpeg audio decoder error.");
- return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
- RESULT_DETAIL("FFmpeg audio error:%d", bytesConsumed));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("FFmpeg audio error:%d", bytesConsumed)),
+ __func__);
}
if (mFrame->format != AV_SAMPLE_FMT_FLT &&
mFrame->format != AV_SAMPLE_FMT_FLTP &&
mFrame->format != AV_SAMPLE_FMT_S16 &&
mFrame->format != AV_SAMPLE_FMT_S16P &&
mFrame->format != AV_SAMPLE_FMT_S32 &&
mFrame->format != AV_SAMPLE_FMT_S32P) {
- return MediaResult(
- NS_ERROR_DOM_MEDIA_DECODE_ERR,
- RESULT_DETAIL("FFmpeg audio decoder outputs unsupported audio format"));
+ return DecodePromise::CreateAndReject(
+ MediaResult(
+ NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("FFmpeg audio decoder outputs unsupported audio format")),
+ __func__);
}
if (decoded) {
uint32_t numChannels = mCodecContext->channels;
AudioConfig::ChannelLayout layout(numChannels);
if (!layout.IsValid()) {
- return MediaResult(
- NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("Unsupported channel layout:%u", numChannels));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Unsupported channel layout:%u", numChannels)),
+ __func__);
}
uint32_t samplingRate = mCodecContext->sample_rate;
AlignedAudioBuffer audio =
CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples);
if (!audio) {
- return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
}
media::TimeUnit duration =
FramesToTimeUnit(mFrame->nb_samples, samplingRate);
if (!duration.IsValid()) {
- return MediaResult(
- NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
- RESULT_DETAIL("Invalid sample duration"));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Invalid sample duration")),
+ __func__);
}
- RefPtr<AudioData> data = new AudioData(samplePosition,
- pts.ToMicroseconds(),
- duration.ToMicroseconds(),
- mFrame->nb_samples,
- Move(audio),
- numChannels,
- samplingRate);
- mCallback->Output(data);
- pts += duration;
- if (!pts.IsValid()) {
- return MediaResult(
- NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
- RESULT_DETAIL("Invalid count of accumulated audio samples"));
+ media::TimeUnit newpts = pts + duration;
+ if (!newpts.IsValid()) {
+ return DecodePromise::CreateAndReject(
+ MediaResult(
+ NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Invalid count of accumulated audio samples")),
+ __func__);
}
+
+ results.AppendElement(new AudioData(
+ samplePosition, pts.ToMicroseconds(), duration.ToMicroseconds(),
+ mFrame->nb_samples, Move(audio), numChannels, samplingRate));
+
+ pts = newpts;
}
packet.data += bytesConsumed;
packet.size -= bytesConsumed;
samplePosition += bytesConsumed;
}
- return NS_OK;
+ return DecodePromise::CreateAndResolve(Move(results), __func__);
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
FFmpegAudioDecoder<LIBAV_VER>::ProcessDrain()
{
ProcessFlush();
- mCallback->DrainComplete();
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
}
AVCodecID
FFmpegAudioDecoder<LIBAV_VER>::GetCodecId(const nsACString& aMimeType)
{
if (aMimeType.EqualsLiteral("audio/mpeg")) {
return AV_CODEC_ID_MP3;
} else if (aMimeType.EqualsLiteral("audio/flac")) {
--- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h
@@ -17,28 +17,27 @@ template <int V> class FFmpegAudioDecode
{
};
template <>
class FFmpegAudioDecoder<LIBAV_VER> : public FFmpegDataDecoder<LIBAV_VER>
{
public:
FFmpegAudioDecoder(FFmpegLibWrapper* aLib, TaskQueue* aTaskQueue,
- MediaDataDecoderCallback* aCallback,
const AudioInfo& aConfig);
virtual ~FFmpegAudioDecoder();
RefPtr<InitPromise> Init() override;
void InitCodecContext() override;
static AVCodecID GetCodecId(const nsACString& aMimeType);
const char* GetDescriptionName() const override
{
return "ffmpeg audio decoder";
}
private:
- MediaResult DoDecode(MediaRawData* aSample) override;
- void ProcessDrain() override;
+ RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> ProcessDrain() override;
};
} // namespace mozilla
#endif // __FFmpegAACDecoder_h__
--- a/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
@@ -16,28 +16,25 @@
#include "FFmpegDataDecoder.h"
#include "prsystem.h"
namespace mozilla
{
StaticMutex FFmpegDataDecoder<LIBAV_VER>::sMonitor;
- FFmpegDataDecoder<LIBAV_VER>::FFmpegDataDecoder(FFmpegLibWrapper* aLib,
- TaskQueue* aTaskQueue,
- MediaDataDecoderCallback* aCallback,
- AVCodecID aCodecID)
+FFmpegDataDecoder<LIBAV_VER>::FFmpegDataDecoder(FFmpegLibWrapper* aLib,
+ TaskQueue* aTaskQueue,
+ AVCodecID aCodecID)
: mLib(aLib)
- , mCallback(aCallback)
, mCodecContext(nullptr)
, mFrame(NULL)
, mExtraData(nullptr)
, mCodecID(aCodecID)
, mTaskQueue(aTaskQueue)
- , mIsFlushing(false)
{
MOZ_ASSERT(aLib);
MOZ_COUNT_CTOR(FFmpegDataDecoder);
}
FFmpegDataDecoder<LIBAV_VER>::~FFmpegDataDecoder()
{
MOZ_COUNT_DTOR(FFmpegDataDecoder);
@@ -85,77 +82,59 @@ FFmpegDataDecoder<LIBAV_VER>::InitDecode
mLib->av_freep(&mCodecContext);
return NS_ERROR_FAILURE;
}
FFMPEG_LOG("FFmpeg init successful.");
return NS_OK;
}
-void
+RefPtr<ShutdownPromise>
FFmpegDataDecoder<LIBAV_VER>::Shutdown()
{
if (mTaskQueue) {
- nsCOMPtr<nsIRunnable> runnable =
- NewRunnableMethod(this, &FFmpegDataDecoder<LIBAV_VER>::ProcessShutdown);
- mTaskQueue->Dispatch(runnable.forget());
- } else {
- ProcessShutdown();
+ RefPtr<FFmpegDataDecoder<LIBAV_VER>> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [self, this]() {
+ ProcessShutdown();
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+ });
}
-}
-
-void
-FFmpegDataDecoder<LIBAV_VER>::ProcessDecode(MediaRawData* aSample)
-{
- MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
- if (mIsFlushing) {
- return;
- }
- MediaResult rv = DoDecode(aSample);
- if (NS_FAILED(rv)) {
- mCallback->Error(rv);
- } else {
- mCallback->InputExhausted();
- }
+ ProcessShutdown();
+ return ShutdownPromise::CreateAndResolve(true, __func__);
}
-void
-FFmpegDataDecoder<LIBAV_VER>::Input(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+FFmpegDataDecoder<LIBAV_VER>::Decode(MediaRawData* aSample)
{
- mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
- this, &FFmpegDataDecoder::ProcessDecode, aSample));
+ return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
+ &FFmpegDataDecoder::ProcessDecode, aSample);
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
FFmpegDataDecoder<LIBAV_VER>::Flush()
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
- mIsFlushing = true;
- nsCOMPtr<nsIRunnable> runnable =
- NewRunnableMethod(this, &FFmpegDataDecoder<LIBAV_VER>::ProcessFlush);
- SyncRunnable::DispatchToThread(mTaskQueue, runnable);
- mIsFlushing = false;
+ return InvokeAsync(mTaskQueue, this, __func__,
+ &FFmpegDataDecoder<LIBAV_VER>::ProcessFlush);
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
FFmpegDataDecoder<LIBAV_VER>::Drain()
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
- nsCOMPtr<nsIRunnable> runnable =
- NewRunnableMethod(this, &FFmpegDataDecoder<LIBAV_VER>::ProcessDrain);
- mTaskQueue->Dispatch(runnable.forget());
+ return InvokeAsync(mTaskQueue, this, __func__,
+ &FFmpegDataDecoder<LIBAV_VER>::ProcessDrain);
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
FFmpegDataDecoder<LIBAV_VER>::ProcessFlush()
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
if (mCodecContext) {
mLib->avcodec_flush_buffers(mCodecContext);
}
+ return FlushPromise::CreateAndResolve(true, __func__);
}
void
FFmpegDataDecoder<LIBAV_VER>::ProcessShutdown()
{
StaticMutexAutoLock mon(sMonitor);
if (mCodecContext) {
--- a/dom/media/platforms/ffmpeg/FFmpegDataDecoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.h
@@ -20,53 +20,48 @@ class FFmpegDataDecoder : public MediaDa
{
};
template <>
class FFmpegDataDecoder<LIBAV_VER> : public MediaDataDecoder
{
public:
FFmpegDataDecoder(FFmpegLibWrapper* aLib, TaskQueue* aTaskQueue,
- MediaDataDecoderCallback* aCallback,
AVCodecID aCodecID);
virtual ~FFmpegDataDecoder();
static bool Link();
RefPtr<InitPromise> Init() override = 0;
- void Input(MediaRawData* aSample) override;
- void Flush() override;
- void Drain() override;
- void Shutdown() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
static AVCodec* FindAVCodec(FFmpegLibWrapper* aLib, AVCodecID aCodec);
protected:
// Flush and Drain operation, always run
- virtual void ProcessFlush();
+ virtual RefPtr<FlushPromise> ProcessFlush();
virtual void ProcessShutdown();
virtual void InitCodecContext() {}
AVFrame* PrepareFrame();
nsresult InitDecoder();
FFmpegLibWrapper* mLib;
- MediaDataDecoderCallback* mCallback;
AVCodecContext* mCodecContext;
AVFrame* mFrame;
RefPtr<MediaByteBuffer> mExtraData;
AVCodecID mCodecID;
private:
- void ProcessDecode(MediaRawData* aSample);
- virtual MediaResult DoDecode(MediaRawData* aSample) = 0;
- virtual void ProcessDrain() = 0;
+ virtual RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample) = 0;
+ virtual RefPtr<DecodePromise> ProcessDrain() = 0;
static StaticMutex sMonitor;
const RefPtr<TaskQueue> mTaskQueue;
- // Set/cleared on reader thread calling Flush() to indicate that output is
- // not required and so input samples on mTaskQueue need not be processed.
- Atomic<bool> mIsFlushing;
+ MozPromiseHolder<DecodePromise> mPromise;
};
} // namespace mozilla
#endif // __FFmpegDataDecoder_h__
--- a/dom/media/platforms/ffmpeg/FFmpegDecoderModule.h
+++ b/dom/media/platforms/ffmpeg/FFmpegDecoderModule.h
@@ -38,29 +38,27 @@ public:
// the check for alpha to PDMFactory but not itself remove the need for a
// check.
if (aParams.VideoConfig().HasAlpha()) {
return nullptr;
}
RefPtr<MediaDataDecoder> decoder =
new FFmpegVideoDecoder<V>(mLib,
aParams.mTaskQueue,
- aParams.mCallback,
aParams.VideoConfig(),
aParams.mImageContainer);
return decoder.forget();
}
already_AddRefed<MediaDataDecoder>
CreateAudioDecoder(const CreateDecoderParams& aParams) override
{
RefPtr<MediaDataDecoder> decoder =
new FFmpegAudioDecoder<V>(mLib,
aParams.mTaskQueue,
- aParams.mCallback,
aParams.AudioConfig());
return decoder.forget();
}
bool SupportsMimeType(const nsACString& aMimeType,
DecoderDoctorDiagnostics* aDiagnostics) const override
{
AVCodecID videoCodec = FFmpegVideoDecoder<V>::GetCodecId(aMimeType);
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -97,20 +97,18 @@ FFmpegVideoDecoder<LIBAV_VER>::PtsCorrec
{
mNumFaultyPts = 0;
mNumFaultyDts = 0;
mLastPts = INT64_MIN;
mLastDts = INT64_MIN;
}
FFmpegVideoDecoder<LIBAV_VER>::FFmpegVideoDecoder(FFmpegLibWrapper* aLib,
- TaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
- const VideoInfo& aConfig,
- ImageContainer* aImageContainer)
- : FFmpegDataDecoder(aLib, aTaskQueue, aCallback, GetCodecId(aConfig.mMimeType))
+ TaskQueue* aTaskQueue, const VideoInfo& aConfig, ImageContainer* aImageContainer)
+ : FFmpegDataDecoder(aLib, aTaskQueue, GetCodecId(aConfig.mMimeType))
, mImageContainer(aImageContainer)
, mInfo(aConfig)
, mCodecParser(nullptr)
, mLastInputDts(INT64_MIN)
{
MOZ_COUNT_CTOR(FFmpegVideoDecoder);
// Use a new MediaByteBuffer as the object will be modified during initialization.
mExtraData = new MediaByteBuffer;
@@ -156,25 +154,31 @@ FFmpegVideoDecoder<LIBAV_VER>::InitCodec
mCodecContext->get_format = ChoosePixelFormat;
mCodecParser = mLib->av_parser_init(mCodecID);
if (mCodecParser) {
mCodecParser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
}
}
-MediaResult
-FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+FFmpegVideoDecoder<LIBAV_VER>::ProcessDecode(MediaRawData* aSample)
{
bool gotFrame = false;
- return DoDecode(aSample, &gotFrame);
+ DecodedData results;
+ MediaResult rv = DoDecode(aSample, &gotFrame, results);
+ if (NS_FAILED(rv)) {
+ return DecodePromise::CreateAndReject(rv, __func__);
+ }
+ return DecodePromise::CreateAndResolve(Move(results), __func__);
}
MediaResult
-FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample, bool* aGotFrame)
+FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample, bool* aGotFrame,
+ MediaDataDecoder::DecodedData& aResults)
{
uint8_t* inputData = const_cast<uint8_t*>(aSample->Data());
size_t inputSize = aSample->Size();
#if LIBAVCODEC_VERSION_MAJOR >= 54
if (inputSize && mCodecParser && (mCodecID == AV_CODEC_ID_VP8
#if LIBAVCODEC_VERSION_MAJOR >= 55
|| mCodecID == AV_CODEC_ID_VP9
@@ -189,35 +193,36 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
aSample->mOffset);
if (size_t(len) > inputSize) {
return NS_ERROR_DOM_MEDIA_DECODE_ERR;
}
inputData += len;
inputSize -= len;
if (size) {
bool gotFrame = false;
- MediaResult rv = DoDecode(aSample, data, size, &gotFrame);
+ MediaResult rv = DoDecode(aSample, data, size, &gotFrame, aResults);
if (NS_FAILED(rv)) {
return rv;
}
if (gotFrame && aGotFrame) {
*aGotFrame = true;
}
}
}
return NS_OK;
}
#endif
- return DoDecode(aSample, inputData, inputSize, aGotFrame);
+ return DoDecode(aSample, inputData, inputSize, aGotFrame, aResults);
}
MediaResult
FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
uint8_t* aData, int aSize,
- bool* aGotFrame)
+ bool* aGotFrame,
+ MediaDataDecoder::DecodedData& aResults)
{
AVPacket packet;
mLib->av_init_packet(&packet);
packet.data = aData;
packet.size = aSize;
packet.dts = mLastInputDts = aSample->mTimecode;
packet.pts = aSample->mTime;
@@ -332,39 +337,41 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
-1,
mInfo.ScaledImageRect(mFrame->width,
mFrame->height));
if (!v) {
return MediaResult(NS_ERROR_OUT_OF_MEMORY,
RESULT_DETAIL("image allocation error"));
}
- mCallback->Output(v);
+ aResults.AppendElement(Move(v));
if (aGotFrame) {
*aGotFrame = true;
}
return NS_OK;
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
FFmpegVideoDecoder<LIBAV_VER>::ProcessDrain()
{
RefPtr<MediaRawData> empty(new MediaRawData());
empty->mTimecode = mLastInputDts;
bool gotFrame = false;
- while (NS_SUCCEEDED(DoDecode(empty, &gotFrame)) && gotFrame);
- mCallback->DrainComplete();
+ DecodedData results;
+ while (NS_SUCCEEDED(DoDecode(empty, &gotFrame, results)) && gotFrame) {
+ }
+ return DecodePromise::CreateAndResolve(Move(results), __func__);
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
FFmpegVideoDecoder<LIBAV_VER>::ProcessFlush()
{
mPtsContext.Reset();
mDurationMap.Clear();
- FFmpegDataDecoder::ProcessFlush();
+ return FFmpegDataDecoder::ProcessFlush();
}
FFmpegVideoDecoder<LIBAV_VER>::~FFmpegVideoDecoder()
{
MOZ_COUNT_DTOR(FFmpegVideoDecoder);
if (mCodecParser) {
mLib->av_parser_close(mCodecParser);
mCodecParser = nullptr;
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
@@ -23,17 +23,16 @@ class FFmpegVideoDecoder : public FFmpeg
template <>
class FFmpegVideoDecoder<LIBAV_VER> : public FFmpegDataDecoder<LIBAV_VER>
{
typedef mozilla::layers::Image Image;
typedef mozilla::layers::ImageContainer ImageContainer;
public:
FFmpegVideoDecoder(FFmpegLibWrapper* aLib, TaskQueue* aTaskQueue,
- MediaDataDecoderCallback* aCallback,
const VideoInfo& aConfig,
ImageContainer* aImageContainer);
virtual ~FFmpegVideoDecoder();
RefPtr<InitPromise> Init() override;
void InitCodecContext() override;
const char* GetDescriptionName() const override
{
@@ -41,21 +40,21 @@ public:
return "ffvpx video decoder";
#else
return "ffmpeg video decoder";
#endif
}
static AVCodecID GetCodecId(const nsACString& aMimeType);
private:
- MediaResult DoDecode(MediaRawData* aSample) override;
- MediaResult DoDecode(MediaRawData* aSample, bool* aGotFrame);
- MediaResult DoDecode(MediaRawData* aSample, uint8_t* aData, int aSize, bool* aGotFrame);
- void ProcessDrain() override;
- void ProcessFlush() override;
+ RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> ProcessDrain() override;
+ RefPtr<FlushPromise> ProcessFlush() override;
+ MediaResult DoDecode(MediaRawData* aSample, bool* aGotFrame, DecodedData& aResults);
+ MediaResult DoDecode(MediaRawData* aSample, uint8_t* aData, int aSize, bool* aGotFrame, DecodedData& aResults);
void OutputDelayedFrames();
/**
* This method allocates a buffer for FFmpeg's decoder, wrapped in an Image.
* Currently it only supports Planar YUV420, which appears to be the only
* non-hardware accelerated image format that FFmpeg's H264 decoder is
* capable of outputting.
*/
--- a/dom/media/platforms/omx/GonkOmxPlatformLayer.cpp
+++ b/dom/media/platforms/omx/GonkOmxPlatformLayer.cpp
@@ -129,17 +129,18 @@ public:
mPromiseLayer = nullptr;
mClient = nullptr;
}
GonkOmxObserver(TaskQueue* aTaskQueue, OmxPromiseLayer* aPromiseLayer, OmxDataDecoder* aDataDecoder)
: mTaskQueue(aTaskQueue)
, mPromiseLayer(aPromiseLayer)
, mClient(aDataDecoder)
- {}
+ {
+ }
protected:
RefPtr<TaskQueue> mTaskQueue;
// TODO:
// we should combine both event handlers into one. And we should provide
// an unified way for event handling in OmxPlatformLayer class.
RefPtr<OmxPromiseLayer> mPromiseLayer;
RefPtr<OmxDataDecoder> mClient;
--- a/dom/media/platforms/omx/OmxDataDecoder.cpp
+++ b/dom/media/platforms/omx/OmxDataDecoder.cpp
@@ -91,34 +91,29 @@ protected:
AudioCompactor mAudioCompactor;
// video output
RefPtr<layers::ImageContainer> mImageContainer;
};
OmxDataDecoder::OmxDataDecoder(const TrackInfo& aTrackInfo,
- MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer)
- : mMonitor("OmxDataDecoder")
- , mOmxTaskQueue(CreateMediaDecodeTaskQueue())
+ : mOmxTaskQueue(CreateMediaDecodeTaskQueue())
, mImageContainer(aImageContainer)
, mWatchManager(this, mOmxTaskQueue)
, mOmxState(OMX_STATETYPE::OMX_StateInvalid, "OmxDataDecoder::mOmxState")
, mTrackInfo(aTrackInfo.Clone())
, mFlushing(false)
, mShuttingDown(false)
, mCheckingInputExhausted(false)
, mPortSettingsChanged(-1, "OmxDataDecoder::mPortSettingsChanged")
- , mCallback(aCallback)
{
LOG("");
mOmxLayer = new OmxPromiseLayer(mOmxTaskQueue, this, aImageContainer);
-
- mOmxTaskQueue->Dispatch(NewRunnableMethod(this, &OmxDataDecoder::InitializationTask));
}
OmxDataDecoder::~OmxDataDecoder()
{
LOG("");
}
void
@@ -129,129 +124,109 @@ OmxDataDecoder::InitializationTask()
}
void
OmxDataDecoder::EndOfStream()
{
LOG("");
MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
- mFlushing = true;
RefPtr<OmxDataDecoder> self = this;
mOmxLayer->SendCommand(OMX_CommandFlush, OMX_ALL, nullptr)
- ->Then(mReaderTaskQueue, __func__,
- [self] () {
- self->mFlushing = false;
- self->mCallback->DrainComplete();
+ ->Then(mOmxTaskQueue, __func__,
+ [self, this] () {
+ mDrainPromise.ResolveIfExists(mDecodedData, __func__);
+ mDecodedData.Clear();
},
- [self] () {
- self->mFlushing = false;
- self->mCallback->DrainComplete();
+ [self, this] () {
+ mDrainPromise.ResolveIfExists(mDecodedData, __func__);
+ mDecodedData.Clear();
});
}
RefPtr<MediaDataDecoder::InitPromise>
OmxDataDecoder::Init()
{
LOG("");
- mReaderTaskQueue = AbstractThread::GetCurrent()->AsTaskQueue();
- MOZ_ASSERT(mReaderTaskQueue);
- RefPtr<InitPromise> p = mInitPromise.Ensure(__func__);
RefPtr<OmxDataDecoder> self = this;
+ return InvokeAsync(mOmxTaskQueue, __func__, [self, this]() {
+ InitializationTask();
- // TODO: it needs to get permission from resource manager before allocating
- // Omx component.
- InvokeAsync<const TrackInfo*>(mOmxTaskQueue, mOmxLayer.get(), __func__,
- &OmxPromiseLayer::Init, mTrackInfo.get())
- ->Then(mOmxTaskQueue, __func__,
- [self] () {
- // Omx state should be OMX_StateIdle.
- self->mOmxState = self->mOmxLayer->GetState();
- MOZ_ASSERT(self->mOmxState != OMX_StateIdle);
- },
- [self] () {
- self->RejectInitPromise(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
- });
-
- return p;
+ RefPtr<InitPromise> p = mInitPromise.Ensure(__func__);
+ mOmxLayer->Init(mTrackInfo.get())
+ ->Then(mOmxTaskQueue, __func__,
+ [self, this]() {
+ // Omx state should be OMX_StateIdle.
+ mOmxState = mOmxLayer->GetState();
+ MOZ_ASSERT(mOmxState != OMX_StateIdle);
+ },
+ [self, this]() {
+ RejectInitPromise(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ });
+ return p;
+ });
}
-void
-OmxDataDecoder::Input(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+OmxDataDecoder::Decode(MediaRawData* aSample)
{
LOG("sample %p", aSample);
MOZ_ASSERT(mInitPromise.IsEmpty());
RefPtr<OmxDataDecoder> self = this;
RefPtr<MediaRawData> sample = aSample;
-
- nsCOMPtr<nsIRunnable> r =
- NS_NewRunnableFunction([self, sample] () {
- self->mMediaRawDatas.AppendElement(sample);
+ return InvokeAsync(mOmxTaskQueue, __func__, [self, this, sample]() {
+ RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
+ mMediaRawDatas.AppendElement(Move(sample));
- // Start to fill/empty buffers.
- if (self->mOmxState == OMX_StateIdle ||
- self->mOmxState == OMX_StateExecuting) {
- self->FillAndEmptyBuffers();
- }
- });
- mOmxTaskQueue->Dispatch(r.forget());
+ // Start to fill/empty buffers.
+ if (mOmxState == OMX_StateIdle ||
+ mOmxState == OMX_StateExecuting) {
+ FillAndEmptyBuffers();
+ }
+ return p;
+ });
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
OmxDataDecoder::Flush()
{
LOG("");
mFlushing = true;
- mOmxTaskQueue->Dispatch(NewRunnableMethod(this, &OmxDataDecoder::DoFlush));
-
- // According to the definition of Flush() in PDM:
- // "the decoder must be ready to accept new input for decoding".
- // So it needs to wait for the Omx to complete the flush command.
- MonitorAutoLock lock(mMonitor);
- while (mFlushing) {
- lock.Wait();
- }
+ return InvokeAsync(mOmxTaskQueue, this, __func__, &OmxDataDecoder::DoFlush);
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
OmxDataDecoder::Drain()
{
LOG("");
- mOmxTaskQueue->Dispatch(NewRunnableMethod(this, &OmxDataDecoder::SendEosBuffer));
+ RefPtr<OmxDataDecoder> self = this;
+ return InvokeAsync(mOmxTaskQueue, __func__, [self, this]() {
+ RefPtr<DecodePromise> p = mDrainPromise.Ensure(__func__);
+ SendEosBuffer();
+ return p;
+ });
}
-void
+RefPtr<ShutdownPromise>
OmxDataDecoder::Shutdown()
{
LOG("");
mShuttingDown = true;
- mOmxTaskQueue->Dispatch(NewRunnableMethod(this, &OmxDataDecoder::DoAsyncShutdown));
-
- {
- // DoAsyncShutdown() will be running for a while, it could be still running
- // when reader releasing the decoder and then it causes problem. To avoid it,
- // Shutdown() must block until DoAsyncShutdown() is completed.
- MonitorAutoLock lock(mMonitor);
- while (mShuttingDown) {
- lock.Wait();
- }
- }
-
- mOmxTaskQueue->BeginShutdown();
- mOmxTaskQueue->AwaitShutdownAndIdle();
+ return InvokeAsync(mOmxTaskQueue, this, __func__,
+ &OmxDataDecoder::DoAsyncShutdown);
}
-void
+RefPtr<ShutdownPromise>
OmxDataDecoder::DoAsyncShutdown()
{
LOG("");
MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
MOZ_ASSERT(!mFlushing);
mWatchManager.Unwatch(mOmxState, &OmxDataDecoder::OmxStateRunner);
mWatchManager.Unwatch(mPortSettingsChanged, &OmxDataDecoder::PortSettingsChanged);
@@ -292,30 +267,33 @@ OmxDataDecoder::DoAsyncShutdown()
->Then(mOmxTaskQueue, __func__,
[self] () {
LOGL("DoAsyncShutdown: OMX_StateLoaded, it is safe to shutdown omx");
self->mOmxLayer->Shutdown();
self->mWatchManager.Shutdown();
self->mOmxLayer = nullptr;
self->mMediaDataHelper = nullptr;
- MonitorAutoLock lock(self->mMonitor);
self->mShuttingDown = false;
- self->mMonitor.Notify();
+ self->mOmxTaskQueue->BeginShutdown();
+ self->mOmxTaskQueue->AwaitShutdownAndIdle();
+ self->mShutdownPromise.Resolve(true, __func__);
},
[self] () {
self->mOmxLayer->Shutdown();
self->mWatchManager.Shutdown();
self->mOmxLayer = nullptr;
self->mMediaDataHelper = nullptr;
- MonitorAutoLock lock(self->mMonitor);
self->mShuttingDown = false;
- self->mMonitor.Notify();
+ self->mOmxTaskQueue->BeginShutdown();
+ self->mOmxTaskQueue->AwaitShutdownAndIdle();
+ self->mShutdownPromise.Resolve(true, __func__);
});
+ return mShutdownPromise.Ensure(__func__);
}
void
OmxDataDecoder::FillBufferDone(BufferData* aData)
{
MOZ_ASSERT(!aData || aData->mStatus == BufferData::BufferStatus::OMX_CLIENT);
// Don't output sample when flush or shutting down, especially for video
@@ -373,70 +351,71 @@ OmxDataDecoder::Output(BufferData* aData
[buffer] () {
MOZ_RELEASE_ASSERT(buffer->mStatus == BufferData::BufferStatus::OMX_CLIENT_OUTPUT);
buffer->mStatus = BufferData::BufferStatus::FREE;
});
} else {
aData->mStatus = BufferData::BufferStatus::FREE;
}
- mCallback->Output(data);
+ mDecodedData.AppendElement(Move(data));
}
void
OmxDataDecoder::FillBufferFailure(OmxBufferFailureHolder aFailureHolder)
{
NotifyError(aFailureHolder.mError, __func__);
}
void
OmxDataDecoder::EmptyBufferDone(BufferData* aData)
{
+ MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
MOZ_ASSERT(!aData || aData->mStatus == BufferData::BufferStatus::OMX_CLIENT);
// Nothing to do when status of input buffer is OMX_CLIENT.
aData->mStatus = BufferData::BufferStatus::FREE;
FillAndEmptyBuffers();
// There is no way to know if component gets enough raw samples to generate
// output, especially for video decoding. So here it needs to request raw
// samples aggressively.
if (!mCheckingInputExhausted && !mMediaRawDatas.Length()) {
mCheckingInputExhausted = true;
RefPtr<OmxDataDecoder> self = this;
- nsCOMPtr<nsIRunnable> r =
- NS_NewRunnableFunction([self] () {
- MOZ_ASSERT(self->mOmxTaskQueue->IsCurrentThreadIn());
-
- self->mCheckingInputExhausted = false;
+ nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([self, this]() {
+ mCheckingInputExhausted = false;
- if (self->mMediaRawDatas.Length()) {
- return;
- }
+ if (mMediaRawDatas.Length()) {
+ return;
+ }
- LOGL("Call InputExhausted()");
- self->mCallback->InputExhausted();
- });
+ mDecodePromise.ResolveIfExists(mDecodedData, __func__);
+ mDecodedData.Clear();
+ });
mOmxTaskQueue->Dispatch(r.forget());
}
}
void
OmxDataDecoder::EmptyBufferFailure(OmxBufferFailureHolder aFailureHolder)
{
NotifyError(aFailureHolder.mError, __func__);
}
void
OmxDataDecoder::NotifyError(OMX_ERRORTYPE aOmxError, const char* aLine, const MediaResult& aError)
{
LOG("NotifyError %d (%d) at %s", aOmxError, aError.Code(), aLine);
- mCallback->Error(aError);
+ mDecodedData.Clear();
+ mDecodePromise.RejectIfExists(aError, __func__);
+ mDrainPromise.RejectIfExists(aError, __func__);
+ mFlushPromise.RejectIfExists(aError, __func__);
}
void
OmxDataDecoder::FillAndEmptyBuffers()
{
MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
MOZ_ASSERT(mOmxState == OMX_StateExecuting);
@@ -533,36 +512,26 @@ OmxDataDecoder::GetBuffers(OMX_DIRTYPE a
return &mInPortBuffers;
}
return &mOutPortBuffers;
}
void
OmxDataDecoder::ResolveInitPromise(const char* aMethodName)
{
+ MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
LOG("called from %s", aMethodName);
- RefPtr<OmxDataDecoder> self = this;
- nsCOMPtr<nsIRunnable> r =
- NS_NewRunnableFunction([self, aMethodName] () {
- MOZ_ASSERT(self->mReaderTaskQueue->IsCurrentThreadIn());
- self->mInitPromise.ResolveIfExists(self->mTrackInfo->GetType(), aMethodName);
- });
- mReaderTaskQueue->Dispatch(r.forget());
+ mInitPromise.ResolveIfExists(mTrackInfo->GetType(), aMethodName);
}
void
OmxDataDecoder::RejectInitPromise(MediaResult aError, const char* aMethodName)
{
- RefPtr<OmxDataDecoder> self = this;
- nsCOMPtr<nsIRunnable> r =
- NS_NewRunnableFunction([self, aError, aMethodName] () {
- MOZ_ASSERT(self->mReaderTaskQueue->IsCurrentThreadIn());
- self->mInitPromise.RejectIfExists(aError, aMethodName);
- });
- mReaderTaskQueue->Dispatch(r.forget());
+ MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
+ mInitPromise.RejectIfExists(aError, aMethodName);
}
void
OmxDataDecoder::OmxStateRunner()
{
MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
LOG("OMX state: %s", StateTypeToStr(mOmxState));
@@ -857,47 +826,51 @@ OmxDataDecoder::SendEosBuffer()
// with EOS flag. However, MediaRawData doesn't provide EOS information,
// so here it generates an empty BufferData with eos OMX_BUFFERFLAG_EOS in queue.
// This behaviour should be compliant with spec, I think...
RefPtr<MediaRawData> eos_data = new MediaRawData();
mMediaRawDatas.AppendElement(eos_data);
FillAndEmptyBuffers();
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
OmxDataDecoder::DoFlush()
{
MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
+ mDecodedData.Clear();
+ mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ mDrainPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+
+ RefPtr<FlushPromise> p = mFlushPromise.Ensure(__func__);
+
// 1. Call OMX command OMX_CommandFlush in Omx TaskQueue.
// 2. Remove all elements in mMediaRawDatas when flush is completed.
mOmxLayer->SendCommand(OMX_CommandFlush, OMX_ALL, nullptr)
->Then(mOmxTaskQueue, __func__, this,
&OmxDataDecoder::FlushComplete,
&OmxDataDecoder::FlushFailure);
+
+ return p;
}
void
OmxDataDecoder::FlushComplete(OMX_COMMANDTYPE aCommandType)
{
mMediaRawDatas.Clear();
mFlushing = false;
- MonitorAutoLock lock(mMonitor);
- mMonitor.Notify();
LOG("Flush complete");
+ mFlushPromise.ResolveIfExists(true, __func__);
}
void OmxDataDecoder::FlushFailure(OmxCommandFailureHolder aFailureHolder)
{
- NotifyError(OMX_ErrorUndefined, __func__);
mFlushing = false;
-
- MonitorAutoLock lock(mMonitor);
- mMonitor.Notify();
+ mFlushPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
}
MediaDataHelper::MediaDataHelper(const TrackInfo* aTrackInfo,
layers::ImageContainer* aImageContainer,
OmxPromiseLayer* aOmxLayer)
: mTrackInfo(aTrackInfo)
, mAudioCompactor(mAudioQueue)
, mImageContainer(aImageContainer)
--- a/dom/media/platforms/omx/OmxDataDecoder.h
+++ b/dom/media/platforms/omx/OmxDataDecoder.h
@@ -56,28 +56,23 @@ typedef OmxPromiseLayer::BUFFERLIST BUFF
* OmxPlatformLayer acts as the OpenMAX IL core.
*/
class OmxDataDecoder : public MediaDataDecoder {
protected:
virtual ~OmxDataDecoder();
public:
OmxDataDecoder(const TrackInfo& aTrackInfo,
- MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer);
RefPtr<InitPromise> Init() override;
-
- void Input(MediaRawData* aSample) override;
-
- void Flush() override;
-
- void Drain() override;
-
- void Shutdown() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
const char* GetDescriptionName() const override
{
return "omx decoder";
}
// Return true if event is handled.
bool Event(OMX_EVENTTYPE aEvent, OMX_U32 aData1, OMX_U32 aData2);
@@ -126,19 +121,19 @@ protected:
void Output(BufferData* aData);
// Buffer can be released if its status is not OMX_COMPONENT or
// OMX_CLIENT_OUTPUT.
bool BuffersCanBeReleased(OMX_DIRTYPE aType);
OMX_DIRTYPE GetPortDirection(uint32_t aPortIndex);
- void DoAsyncShutdown();
+ RefPtr<ShutdownPromise> DoAsyncShutdown();
- void DoFlush();
+ RefPtr<FlushPromise> DoFlush();
void FlushComplete(OMX_COMMANDTYPE aCommandType);
void FlushFailure(OmxCommandFailureHolder aFailureHolder);
BUFFERLIST* GetBuffers(OMX_DIRTYPE aType);
nsresult AllocateBuffers(OMX_DIRTYPE aType);
@@ -146,64 +141,66 @@ protected:
nsresult ReleaseBuffers(OMX_DIRTYPE aType);
BufferData* FindAvailableBuffer(OMX_DIRTYPE aType);
// aType could be OMX_DirMax for all types.
RefPtr<OmxPromiseLayer::OmxBufferPromise::AllPromiseType>
CollectBufferPromises(OMX_DIRTYPE aType);
- Monitor mMonitor;
-
// The Omx TaskQueue.
RefPtr<TaskQueue> mOmxTaskQueue;
- RefPtr<TaskQueue> mReaderTaskQueue;
-
RefPtr<layers::ImageContainer> mImageContainer;
WatchManager<OmxDataDecoder> mWatchManager;
// It is accessed in omx TaskQueue.
Watchable<OMX_STATETYPE> mOmxState;
RefPtr<OmxPromiseLayer> mOmxLayer;
UniquePtr<TrackInfo> mTrackInfo;
// It is accessed in both omx and reader TaskQueue.
Atomic<bool> mFlushing;
- // It is accessed in Omx/reader TaskQeueu.
+ // It is accessed in Omx/reader TaskQueue.
Atomic<bool> mShuttingDown;
// It is accessed in Omx TaskQeueu.
bool mCheckingInputExhausted;
- // It is accessed in reader TaskQueue.
+ // It is accessed in OMX TaskQueue.
MozPromiseHolder<InitPromise> mInitPromise;
+ MozPromiseHolder<DecodePromise> mDecodePromise;
+ MozPromiseHolder<DecodePromise> mDrainPromise;
+ MozPromiseHolder<FlushPromise> mFlushPromise;
+ MozPromiseHolder<ShutdownPromise> mShutdownPromise;
+ // Where decoded samples will be stored until the decode promise is resolved.
+ DecodedData mDecodedData;
- // It is written in Omx TaskQeueu. Read in Omx TaskQueue.
+ void CompleteDrain();
+
+ // It is written in Omx TaskQueue. Read in Omx TaskQueue.
// It value means the port index which port settings is changed.
// -1 means no port setting changed.
//
// Note: when port setting changed, there should be no buffer operations
// via EmptyBuffer or FillBuffer.
Watchable<int32_t> mPortSettingsChanged;
// It is access in Omx TaskQueue.
nsTArray<RefPtr<MediaRawData>> mMediaRawDatas;
BUFFERLIST mInPortBuffers;
BUFFERLIST mOutPortBuffers;
RefPtr<MediaDataHelper> mMediaDataHelper;
-
- MediaDataDecoderCallback* mCallback;
};
template<class T>
void InitOmxParameter(T* aParam)
{
PodZero(aParam);
aParam->nSize = sizeof(T);
aParam->nVersion.s.nVersionMajor = 1;
--- a/dom/media/platforms/omx/OmxDecoderModule.cpp
+++ b/dom/media/platforms/omx/OmxDecoderModule.cpp
@@ -10,26 +10,24 @@
#include "OmxPlatformLayer.h"
namespace mozilla {
already_AddRefed<MediaDataDecoder>
OmxDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
{
RefPtr<OmxDataDecoder> decoder = new OmxDataDecoder(aParams.mConfig,
- aParams.mCallback,
aParams.mImageContainer);
return decoder.forget();
}
already_AddRefed<MediaDataDecoder>
OmxDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
{
RefPtr<OmxDataDecoder> decoder = new OmxDataDecoder(aParams.mConfig,
- aParams.mCallback,
nullptr);
return decoder.forget();
}
PlatformDecoderModule::ConversionRequired
OmxDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
{
return ConversionRequired::kNeedNone;
--- a/dom/media/platforms/wmf/WMFAudioMFTManager.h
+++ b/dom/media/platforms/wmf/WMFAudioMFTManager.h
@@ -11,58 +11,59 @@
#include "MFTDecoder.h"
#include "mozilla/RefPtr.h"
#include "WMFMediaDataDecoder.h"
extern const GUID CLSID_WebmMfVpxDec;
namespace mozilla {
-class WMFAudioMFTManager : public MFTManager {
+class WMFAudioMFTManager : public MFTManager
+{
public:
explicit WMFAudioMFTManager(const AudioInfo& aConfig);
~WMFAudioMFTManager();
bool Init();
HRESULT Input(MediaRawData* aSample) override;
// Note WMF's AAC decoder sometimes output negatively timestamped samples,
// presumably they're the preroll samples, and we strip them. We may return
// a null aOutput in this case.
- HRESULT Output(int64_t aStreamOffset,
- RefPtr<MediaData>& aOutput) override;
+ HRESULT Output(int64_t aStreamOffset, RefPtr<MediaData>& aOutput) override;
void Shutdown() override;
- TrackInfo::TrackType GetType() override {
+ TrackInfo::TrackType GetType() override
+ {
return TrackInfo::kAudioTrack;
}
const char* GetDescriptionName() const override
{
return "wmf audio decoder";
}
private:
-
HRESULT UpdateOutputType();
uint32_t mAudioChannels;
uint32_t mAudioRate;
nsTArray<BYTE> mUserData;
// The offset, at which playback started since the
// last discontinuity.
media::TimeUnit mAudioTimeOffset;
// The number of audio frames that we've played since the last
// discontinuity.
int64_t mAudioFrameSum;
- enum StreamType {
+ enum StreamType
+ {
Unknown,
AAC,
MP3
};
StreamType mStreamType;
const GUID& GetMFTGUID();
const GUID& GetMediaSubtypeGUID();
--- a/dom/media/platforms/wmf/WMFDecoderModule.cpp
+++ b/dom/media/platforms/wmf/WMFDecoderModule.cpp
@@ -90,32 +90,32 @@ WMFDecoderModule::CreateVideoDecoder(con
aParams.mImageContainer,
sDXVAEnabled));
if (!manager->Init()) {
return nullptr;
}
RefPtr<MediaDataDecoder> decoder =
- new WMFMediaDataDecoder(manager.forget(), aParams.mTaskQueue, aParams.mCallback);
+ new WMFMediaDataDecoder(manager.forget(), aParams.mTaskQueue);
return decoder.forget();
}
already_AddRefed<MediaDataDecoder>
WMFDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
{
nsAutoPtr<WMFAudioMFTManager> manager(new WMFAudioMFTManager(aParams.AudioConfig()));
if (!manager->Init()) {
return nullptr;
}
RefPtr<MediaDataDecoder> decoder =
- new WMFMediaDataDecoder(manager.forget(), aParams.mTaskQueue, aParams.mCallback);
+ new WMFMediaDataDecoder(manager.forget(), aParams.mTaskQueue);
return decoder.forget();
}
static bool
CanCreateMFTDecoder(const GUID& aGuid)
{
if (FAILED(wmf::MFStartup())) {
return false;
--- a/dom/media/platforms/wmf/WMFDecoderModule.h
+++ b/dom/media/platforms/wmf/WMFDecoderModule.h
@@ -6,17 +6,18 @@
#if !defined(WMFPlatformDecoderModule_h_)
#define WMFPlatformDecoderModule_h_
#include "PlatformDecoderModule.h"
namespace mozilla {
-class WMFDecoderModule : public PlatformDecoderModule {
+class WMFDecoderModule : public PlatformDecoderModule
+{
public:
WMFDecoderModule();
virtual ~WMFDecoderModule();
// Initializes the module, loads required dynamic libraries, etc.
nsresult Startup() override;
already_AddRefed<MediaDataDecoder>
--- a/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp
+++ b/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp
@@ -13,22 +13,19 @@
#include "mozilla/Logging.h"
#include "mozilla/SyncRunnable.h"
#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
namespace mozilla {
WMFMediaDataDecoder::WMFMediaDataDecoder(MFTManager* aMFTManager,
- TaskQueue* aTaskQueue,
- MediaDataDecoderCallback* aCallback)
+ TaskQueue* aTaskQueue)
: mTaskQueue(aTaskQueue)
- , mCallback(aCallback)
, mMFTManager(aMFTManager)
- , mIsFlushing(false)
, mIsShutDown(false)
{
}
WMFMediaDataDecoder::~WMFMediaDataDecoder()
{
}
@@ -67,162 +64,170 @@ SendTelemetry(unsigned long hr)
nsCOMPtr<nsIRunnable> runnable = NS_NewRunnableFunction(
[sample] {
Telemetry::Accumulate(Telemetry::MEDIA_WMF_DECODE_ERROR, sample);
});
NS_DispatchToMainThread(runnable);
}
-void
+RefPtr<ShutdownPromise>
WMFMediaDataDecoder::Shutdown()
{
MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
+ mIsShutDown = true;
+
if (mTaskQueue) {
- mTaskQueue->Dispatch(NewRunnableMethod(this, &WMFMediaDataDecoder::ProcessShutdown));
- } else {
- ProcessShutdown();
+ return InvokeAsync(mTaskQueue, this, __func__,
+ &WMFMediaDataDecoder::ProcessShutdown);
}
- mIsShutDown = true;
+ return ProcessShutdown();
}
-void
+RefPtr<ShutdownPromise>
WMFMediaDataDecoder::ProcessShutdown()
{
if (mMFTManager) {
mMFTManager->Shutdown();
mMFTManager = nullptr;
if (!mRecordedError && mHasSuccessfulOutput) {
SendTelemetry(S_OK);
}
}
+ return ShutdownPromise::CreateAndResolve(true, __func__);
}
// Inserts data into the decoder's pipeline.
-void
-WMFMediaDataDecoder::Input(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+WMFMediaDataDecoder::Decode(MediaRawData* aSample)
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
- nsCOMPtr<nsIRunnable> runnable =
- NewRunnableMethod<RefPtr<MediaRawData>>(
- this,
- &WMFMediaDataDecoder::ProcessDecode,
- RefPtr<MediaRawData>(aSample));
- mTaskQueue->Dispatch(runnable.forget());
+ return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
+ &WMFMediaDataDecoder::ProcessDecode,
+ aSample);
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
WMFMediaDataDecoder::ProcessDecode(MediaRawData* aSample)
{
- if (mIsFlushing) {
- // Skip sample, to be released by runnable.
- return;
- }
-
HRESULT hr = mMFTManager->Input(aSample);
if (hr == MF_E_NOTACCEPTING) {
ProcessOutput();
hr = mMFTManager->Input(aSample);
}
if (FAILED(hr)) {
NS_WARNING("MFTManager rejected sample");
- mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
- RESULT_DETAIL("MFTManager::Input:%x", hr)));
if (!mRecordedError) {
SendTelemetry(hr);
mRecordedError = true;
}
- return;
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("MFTManager::Input:%x", hr)),
+ __func__);
}
mLastStreamOffset = aSample->mOffset;
+ RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
ProcessOutput();
+ return p;
}
void
WMFMediaDataDecoder::ProcessOutput()
{
RefPtr<MediaData> output;
HRESULT hr = S_OK;
+ DecodedData results;
while (SUCCEEDED(hr = mMFTManager->Output(mLastStreamOffset, output)) &&
output) {
mHasSuccessfulOutput = true;
- mCallback->Output(output);
+ results.AppendElement(Move(output));
}
if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
- mCallback->InputExhausted();
- } else if (FAILED(hr)) {
+ if (!mDecodePromise.IsEmpty()) {
+ mDecodePromise.Resolve(Move(results), __func__);
+ } else {
+ mDrainPromise.Resolve(Move(results), __func__);
+ }
+ return;
+ }
+ if (FAILED(hr)) {
NS_WARNING("WMFMediaDataDecoder failed to output data");
- mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
- RESULT_DETAIL("MFTManager::Output:%x", hr)));
+ const auto error = MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("MFTManager::Output:%x", hr));
+ if (!mDecodePromise.IsEmpty()) {
+ mDecodePromise.Reject(error, __func__);
+ }
+ else {
+ mDrainPromise.Reject(error, __func__);
+ }
+
if (!mRecordedError) {
SendTelemetry(hr);
mRecordedError = true;
}
}
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
WMFMediaDataDecoder::ProcessFlush()
{
if (mMFTManager) {
mMFTManager->Flush();
}
+ return FlushPromise::CreateAndResolve(true, __func__);
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
WMFMediaDataDecoder::Flush()
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
- mIsFlushing = true;
- nsCOMPtr<nsIRunnable> runnable =
- NewRunnableMethod(this, &WMFMediaDataDecoder::ProcessFlush);
- SyncRunnable::DispatchToThread(mTaskQueue, runnable);
- mIsFlushing = false;
+ return InvokeAsync(mTaskQueue, this, __func__,
+ &WMFMediaDataDecoder::ProcessFlush);
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
WMFMediaDataDecoder::ProcessDrain()
{
- if (!mIsFlushing && mMFTManager) {
- // Order the decoder to drain...
- mMFTManager->Drain();
- // Then extract all available output.
- ProcessOutput();
+ if (!mMFTManager) {
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
}
- mCallback->DrainComplete();
+ // Order the decoder to drain...
+ mMFTManager->Drain();
+ // Then extract all available output.
+ RefPtr<DecodePromise> p = mDrainPromise.Ensure(__func__);
+ ProcessOutput();
+ return p;
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
WMFMediaDataDecoder::Drain()
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
- mTaskQueue->Dispatch(NewRunnableMethod(this, &WMFMediaDataDecoder::ProcessDrain));
+ return InvokeAsync(mTaskQueue, this, __func__,
+ &WMFMediaDataDecoder::ProcessDrain);
}
bool
WMFMediaDataDecoder::IsHardwareAccelerated(nsACString& aFailureReason) const {
MOZ_ASSERT(!mIsShutDown);
return mMFTManager && mMFTManager->IsHardwareAccelerated(aFailureReason);
}
void
WMFMediaDataDecoder::SetSeekThreshold(const media::TimeUnit& aTime)
{
- MOZ_ASSERT(mCallback->OnReaderTaskQueue());
MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
RefPtr<WMFMediaDataDecoder> self = this;
nsCOMPtr<nsIRunnable> runnable =
NS_NewRunnableFunction([self, aTime]() {
media::TimeUnit threshold = aTime;
self->mMFTManager->SetSeekThreshold(threshold);
});
--- a/dom/media/platforms/wmf/WMFMediaDataDecoder.h
+++ b/dom/media/platforms/wmf/WMFMediaDataDecoder.h
@@ -14,17 +14,18 @@
#include "nsAutoPtr.h"
#include "PlatformDecoderModule.h"
namespace mozilla {
// Encapsulates the initialization of the MFTDecoder appropriate for decoding
// a given stream, and the process of converting the IMFSample produced
// by the MFT into a MediaData object.
-class MFTManager {
+class MFTManager
+{
public:
virtual ~MFTManager() {}
// Submit a compressed sample for decoding.
// This should forward to the MFTDecoder after performing
// any required sample formatting.
virtual HRESULT Input(MediaRawData* aSample) = 0;
@@ -54,93 +55,91 @@ public:
virtual void Shutdown() = 0;
virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const { return false; }
virtual TrackInfo::TrackType GetType() = 0;
virtual const char* GetDescriptionName() const = 0;
- virtual void SetSeekThreshold(const media::TimeUnit& aTime) {
+ virtual void SetSeekThreshold(const media::TimeUnit& aTime)
+ {
mSeekTargetThreshold = Some(aTime);
}
protected:
// IMFTransform wrapper that performs the decoding.
RefPtr<MFTDecoder> mDecoder;
Maybe<media::TimeUnit> mSeekTargetThreshold;
};
// Decodes audio and video using Windows Media Foundation. Samples are decoded
// using the MFTDecoder created by the MFTManager. This class implements
// the higher-level logic that drives mapping the MFT to the async
// MediaDataDecoder interface. The specifics of decoding the exact stream
// type are handled by MFTManager and the MFTDecoder it creates.
-class WMFMediaDataDecoder : public MediaDataDecoder {
+class WMFMediaDataDecoder : public MediaDataDecoder
+{
public:
WMFMediaDataDecoder(MFTManager* aOutputSource,
- TaskQueue* aTaskQueue,
- MediaDataDecoderCallback* aCallback);
+ TaskQueue* aTaskQueue);
~WMFMediaDataDecoder();
RefPtr<MediaDataDecoder::InitPromise> Init() override;
- void Input(MediaRawData* aSample);
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
- void Flush() override;
+ RefPtr<DecodePromise> Drain() override;
- void Drain() override;
+ RefPtr<FlushPromise> Flush() override;
- void Shutdown() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
const char* GetDescriptionName() const override
{
return mMFTManager ? mMFTManager->GetDescriptionName() : "";
}
virtual void SetSeekThreshold(const media::TimeUnit& aTime) override;
private:
// Called on the task queue. Inserts the sample into the decoder, and
// extracts output if available.
- void ProcessDecode(MediaRawData* aSample);
+ RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
// Called on the task queue. Extracts output if available, and delivers
// it to the reader. Called after ProcessDecode() and ProcessDrain().
void ProcessOutput();
// Called on the task queue. Orders the MFT to flush. There is no output to
// extract.
- void ProcessFlush();
+ RefPtr<FlushPromise> ProcessFlush();
// Called on the task queue. Orders the MFT to drain, and then extracts
// all available output.
- void ProcessDrain();
+ RefPtr<DecodePromise> ProcessDrain();
- void ProcessShutdown();
+ RefPtr<ShutdownPromise> ProcessShutdown();
const RefPtr<TaskQueue> mTaskQueue;
- MediaDataDecoderCallback* mCallback;
nsAutoPtr<MFTManager> mMFTManager;
// The last offset into the media resource that was passed into Input().
// This is used to approximate the decoder's position in the media resource.
int64_t mLastStreamOffset;
- // Set on reader/decode thread calling Flush() to indicate that output is
- // not required and so input samples on mTaskQueue need not be processed.
- // Cleared on mTaskQueue.
- Atomic<bool> mIsFlushing;
+ bool mIsShutDown;
- bool mIsShutDown;
+ MozPromiseHolder<DecodePromise> mDecodePromise;
+ MozPromiseHolder<DecodePromise> mDrainPromise;
// For telemetry
bool mHasSuccessfulOutput = false;
bool mRecordedError = false;
};
} // namespace mozilla
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.h
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.h
@@ -38,18 +38,18 @@ public:
TrackInfo::TrackType GetType() override {
return TrackInfo::kVideoTrack;
}
const char* GetDescriptionName() const override
{
nsCString failureReason;
- return IsHardwareAccelerated(failureReason)
- ? "wmf hardware video decoder" : "wmf software video decoder";
+ return IsHardwareAccelerated(failureReason) ? "wmf hardware video decoder"
+ : "wmf software video decoder";
}
void Flush() override
{
MFTManager::Flush();
mDraining = false;
mSamplesCount = 0;
}
--- a/dom/media/platforms/wrappers/H264Converter.cpp
+++ b/dom/media/platforms/wrappers/H264Converter.cpp
@@ -18,17 +18,16 @@ namespace mozilla
H264Converter::H264Converter(PlatformDecoderModule* aPDM,
const CreateDecoderParams& aParams)
: mPDM(aPDM)
, mCurrentConfig(aParams.VideoConfig())
, mKnowsCompositor(aParams.mKnowsCompositor)
, mImageContainer(aParams.mImageContainer)
, mTaskQueue(aParams.mTaskQueue)
- , mCallback(aParams.mCallback)
, mDecoder(nullptr)
, mGMPCrashHelper(aParams.mCrashHelper)
, mNeedAVCC(aPDM->DecoderNeedsConversion(aParams.mConfig)
== PlatformDecoderModule::ConversionRequired::kNeedAVCC)
, mLastError(NS_OK)
{
CreateDecoder(aParams.mDiagnostics);
}
@@ -44,105 +43,108 @@ H264Converter::Init()
return mDecoder->Init();
}
// We haven't been able to initialize a decoder due to a missing SPS/PPS.
return MediaDataDecoder::InitPromise::CreateAndResolve(
TrackType::kVideoTrack, __func__);
}
-void
-H264Converter::Input(MediaRawData* aSample)
+RefPtr<MediaDataDecoder::DecodePromise>
+H264Converter::Decode(MediaRawData* aSample)
{
- MOZ_RELEASE_ASSERT(!mInitPromiseRequest.Exists(),
- "Still processing previous sample");
+ MOZ_RELEASE_ASSERT(!mDecodePromiseRequest.Exists() &&
+ !mInitPromiseRequest.Exists(),
+ "Can't request a new decode until previous one completed");
if (!mp4_demuxer::AnnexB::ConvertSampleToAVCC(aSample)) {
// We need AVCC content to be able to later parse the SPS.
// This is a no-op if the data is already AVCC.
- mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
- RESULT_DETAIL("ConvertSampleToAVCC")));
- return;
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY, RESULT_DETAIL("ConvertSampleToAVCC")),
+ __func__);
}
nsresult rv;
if (!mDecoder) {
// It is not possible to create an AVCC H264 decoder without SPS.
// As such, creation will fail if the extra_data just extracted doesn't
// contain a SPS.
rv = CreateDecoderAndInit(aSample);
if (rv == NS_ERROR_NOT_INITIALIZED) {
// We are missing the required SPS to create the decoder.
// Ignore for the time being, the MediaRawData will be dropped.
- mCallback->InputExhausted();
- return;
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
}
} else {
rv = CheckForSPSChange(aSample);
}
if (rv == NS_ERROR_DOM_MEDIA_INITIALIZING_DECODER) {
// The decoder is pending initialization.
- return;
+ RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
+ return p;
}
if (NS_FAILED(rv)) {
- mCallback->Error(
+ return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("Unable to create H264 decoder")));
- return;
+ RESULT_DETAIL("Unable to create H264 decoder")),
+ __func__);
}
if (mNeedKeyframe && !aSample->mKeyframe) {
- mCallback->InputExhausted();
- return;
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
}
if (!mNeedAVCC &&
!mp4_demuxer::AnnexB::ConvertSampleToAnnexB(aSample, mNeedKeyframe)) {
- mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
- RESULT_DETAIL("ConvertSampleToAnnexB")));
- return;
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("ConvertSampleToAnnexB")),
+ __func__);
}
mNeedKeyframe = false;
aSample->mExtraData = mCurrentConfig.mExtraData;
- mDecoder->Input(aSample);
+ return mDecoder->Decode(aSample);
}
-void
+RefPtr<MediaDataDecoder::FlushPromise>
H264Converter::Flush()
{
mNeedKeyframe = true;
if (mDecoder) {
- mDecoder->Flush();
+ return mDecoder->Flush();
}
+ return FlushPromise::CreateAndResolve(true, __func__);
}
-void
+RefPtr<MediaDataDecoder::DecodePromise>
H264Converter::Drain()
{
mNeedKeyframe = true;
if (mDecoder) {
- mDecoder->Drain();
- return;
+ return mDecoder->Drain();
}
- mCallback->DrainComplete();
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
}
-void
+RefPtr<ShutdownPromise>
H264Converter::Shutdown()
{
if (mDecoder) {
- mDecoder->Shutdown();
mInitPromiseRequest.DisconnectIfExists();
- mDecoder = nullptr;
+ mDecodePromiseRequest.DisconnectIfExists();
+ RefPtr<MediaDataDecoder> decoder = mDecoder.forget();
+ return decoder->Shutdown();
}
+ return ShutdownPromise::CreateAndResolve(true, __func__);
}
bool
H264Converter::IsHardwareAccelerated(nsACString& aFailureReason) const
{
if (mDecoder) {
return mDecoder->IsHardwareAccelerated(aFailureReason);
}
@@ -184,17 +186,16 @@ H264Converter::CreateDecoder(DecoderDoct
// SPS was invalid.
mLastError = NS_ERROR_FAILURE;
return NS_ERROR_FAILURE;
}
mDecoder = mPDM->CreateVideoDecoder({
mCurrentConfig,
mTaskQueue,
- mCallback,
aDiagnostics,
mImageContainer,
mKnowsCompositor,
mGMPCrashHelper
});
if (!mDecoder) {
mLastError = NS_ERROR_FAILURE;
@@ -233,36 +234,49 @@ H264Converter::CreateDecoderAndInit(Medi
}
void
H264Converter::OnDecoderInitDone(const TrackType aTrackType)
{
mInitPromiseRequest.Complete();
RefPtr<MediaRawData> sample = mPendingSample.forget();
if (mNeedKeyframe && !sample->mKeyframe) {
- mCallback->InputExhausted();
- return;
+ mDecodePromise.ResolveIfExists(DecodedData(), __func__);
}
mNeedKeyframe = false;
if (!mNeedAVCC &&
!mp4_demuxer::AnnexB::ConvertSampleToAnnexB(sample, mNeedKeyframe)) {
- mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
- RESULT_DETAIL("ConvertSampleToAnnexB")));
+ mDecodePromise.RejectIfExists(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("ConvertSampleToAnnexB")),
+ __func__);
return;
}
- mDecoder->Input(sample);
+ RefPtr<H264Converter> self = this;
+ mDecoder->Decode(sample)
+ ->Then(AbstractThread::GetCurrent()->AsTaskQueue(), __func__,
+ [self, this](const MediaDataDecoder::DecodedData& aResults) {
+ mDecodePromiseRequest.Complete();
+ mDecodePromise.ResolveIfExists(aResults, __func__);
+ },
+ [self, this](const MediaResult& aError) {
+ mDecodePromiseRequest.Complete();
+ mDecodePromise.RejectIfExists(aError, __func__);
+ })
+ ->Track(mDecodePromiseRequest);
}
void
H264Converter::OnDecoderInitFailed(const MediaResult& aError)
{
mInitPromiseRequest.Complete();
- mCallback->Error(
+ mDecodePromise.RejectIfExists(
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("Unable to initialize H264 decoder")));
+ RESULT_DETAIL("Unable to initialize H264 decoder")),
+ __func__);
}
nsresult
H264Converter::CheckForSPSChange(MediaRawData* aSample)
{
RefPtr<MediaByteBuffer> extra_data =
mp4_demuxer::AnnexB::ExtractExtraData(aSample);
if (!mp4_demuxer::AnnexB::HasSPS(extra_data) ||
--- a/dom/media/platforms/wrappers/H264Converter.h
+++ b/dom/media/platforms/wrappers/H264Converter.h
@@ -13,28 +13,29 @@ namespace mozilla {
// H264Converter is a MediaDataDecoder wrapper used to ensure that
// only AVCC or AnnexB is fed to the underlying MediaDataDecoder.
// The H264Converter allows playback of content where the SPS NAL may not be
// provided in the init segment (e.g. AVC3 or Annex B)
// H264Converter will monitor the input data, and will delay creation of the
// MediaDataDecoder until a SPS and PPS NALs have been extracted.
-class H264Converter : public MediaDataDecoder {
+class H264Converter : public MediaDataDecoder
+{
public:
H264Converter(PlatformDecoderModule* aPDM,
const CreateDecoderParams& aParams);
virtual ~H264Converter();
RefPtr<InitPromise> Init() override;
- void Input(MediaRawData* aSample) override;
- void Flush() override;
- void Drain() override;
- void Shutdown() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
const char* GetDescriptionName() const override
{
if (mDecoder) {
return mDecoder->GetDescriptionName();
}
return "H264Converter decoder (pending)";
}
@@ -62,19 +63,20 @@ private:
void OnDecoderInitFailed(const MediaResult& aError);
RefPtr<PlatformDecoderModule> mPDM;
VideoInfo mCurrentConfig;
RefPtr<layers::KnowsCompositor> mKnowsCompositor;
RefPtr<layers::ImageContainer> mImageContainer;
const RefPtr<TaskQueue> mTaskQueue;
RefPtr<MediaRawData> mPendingSample;
- MediaDataDecoderCallback* mCallback;
RefPtr<MediaDataDecoder> mDecoder;
MozPromiseRequestHolder<InitPromise> mInitPromiseRequest;
+ MozPromiseRequestHolder<DecodePromise> mDecodePromiseRequest;
+ MozPromiseHolder<DecodePromise> mDecodePromise;
RefPtr<GMPCrashHelper> mGMPCrashHelper;
bool mNeedAVCC;
nsresult mLastError;
bool mNeedKeyframe = true;
};
} // namespace mozilla