--- a/dom/media/platforms/PDMFactory.cpp
+++ b/dom/media/platforms/PDMFactory.cpp
@@ -97,17 +97,17 @@ PDMFactory::EnsureInit() const
NS_NewRunnableFunction([]() { ClearOnShutdown(&sInstance); });
NS_DispatchToMainThread(runnable);
}
}
}
already_AddRefed<MediaDataDecoder>
PDMFactory::CreateDecoder(const TrackInfo& aConfig,
- FlushableTaskQueue* aTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer) const
{
bool isEncrypted = mEMEPDM && aConfig.mCrypto.mValid;
if (isEncrypted) {
@@ -152,17 +152,17 @@ PDMFactory::CreateDecoder(const TrackInf
}
NS_WARNING("Unable to create a decoder, no platform found.");
return nullptr;
}
already_AddRefed<MediaDataDecoder>
PDMFactory::CreateDecoderWithPDM(PlatformDecoderModule* aPDM,
const TrackInfo& aConfig,
- FlushableTaskQueue* aTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer) const
{
MOZ_ASSERT(aPDM);
RefPtr<MediaDataDecoder> m;
--- a/dom/media/platforms/PDMFactory.h
+++ b/dom/media/platforms/PDMFactory.h
@@ -26,17 +26,17 @@ public:
// Factory method that creates the appropriate PlatformDecoderModule for
// the platform we're running on. Caller is responsible for deleting this
// instance. It's expected that there will be multiple
// PlatformDecoderModules alive at the same time.
// This is called on the decode task queue.
already_AddRefed<MediaDataDecoder>
CreateDecoder(const TrackInfo& aConfig,
- FlushableTaskQueue* aTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics,
layers::LayersBackend aLayersBackend = layers::LayersBackend::LAYERS_NONE,
layers::ImageContainer* aImageContainer = nullptr) const;
bool SupportsMimeType(const nsACString& aMimeType,
DecoderDoctorDiagnostics* aDiagnostics) const;
@@ -57,17 +57,17 @@ private:
// Returns the first PDM in our list supporting the mimetype.
already_AddRefed<PlatformDecoderModule>
GetDecoder(const nsACString& aMimeType,
DecoderDoctorDiagnostics* aDiagnostics) const;
already_AddRefed<MediaDataDecoder>
CreateDecoderWithPDM(PlatformDecoderModule* aPDM,
const TrackInfo& aConfig,
- FlushableTaskQueue* aTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer) const;
nsTArray<RefPtr<PlatformDecoderModule>> mCurrentPDMs;
RefPtr<PlatformDecoderModule> mEMEPDM;
--- a/dom/media/platforms/PlatformDecoderModule.h
+++ b/dom/media/platforms/PlatformDecoderModule.h
@@ -2,17 +2,16 @@
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined(PlatformDecoderModule_h_)
#define PlatformDecoderModule_h_
-#include "FlushableTaskQueue.h"
#include "MediaDecoderReader.h"
#include "mozilla/MozPromise.h"
#include "mozilla/layers/LayersTypes.h"
#include "nsTArray.h"
#include "mozilla/RefPtr.h"
#include <queue>
namespace mozilla {
@@ -23,17 +22,17 @@ class MediaRawData;
class DecoderDoctorDiagnostics;
namespace layers {
class ImageContainer;
} // namespace layers
class MediaDataDecoder;
class MediaDataDecoderCallback;
-class FlushableTaskQueue;
+class TaskQueue;
class CDMProxy;
// The PlatformDecoderModule interface is used by the MediaFormatReader to
// abstract access to decoders provided by various
// platforms.
// Each platform (Windows, MacOSX, Linux, B2G etc) must implement a
// PlatformDecoderModule to provide access to its decoders in order to get
// decompressed H.264/AAC from the MediaFormatReader.
@@ -85,33 +84,33 @@ protected:
// COINIT_MULTITHREADED.
// Returns nullptr if the decoder can't be created.
// It is safe to store a reference to aConfig.
// This is called on the decode task queue.
virtual already_AddRefed<MediaDataDecoder>
CreateVideoDecoder(const VideoInfo& aConfig,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) = 0;
// Creates an Audio decoder with the specified properties.
// Asynchronous decoding of audio should be done in runnables dispatched to
// aAudioTaskQueue. If the task queue isn't needed, the decoder should
// not hold a reference to it.
// Output and errors should be returned to the reader via aCallback.
// Returns nullptr if the decoder can't be created.
// On Windows the task queue's threads in have MSCOM initialized with
// COINIT_MULTITHREADED.
// It is safe to store a reference to aConfig.
// This is called on the decode task queue.
virtual already_AddRefed<MediaDataDecoder>
CreateAudioDecoder(const AudioInfo& aConfig,
- FlushableTaskQueue* aAudioTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) = 0;
};
// A callback used by MediaDataDecoder to return output/errors to the
// MediaFormatReader.
// Implementation is threadsafe, and can be called on any thread.
class MediaDataDecoderCallback {
--- a/dom/media/platforms/agnostic/AgnosticDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/AgnosticDecoderModule.cpp
@@ -21,47 +21,47 @@ AgnosticDecoderModule::SupportsMimeType(
VorbisDataDecoder::IsVorbis(aMimeType) ||
WaveDataDecoder::IsWave(aMimeType);
}
already_AddRefed<MediaDataDecoder>
AgnosticDecoderModule::CreateVideoDecoder(const VideoInfo& aConfig,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics)
{
RefPtr<MediaDataDecoder> m;
if (VPXDecoder::IsVPX(aConfig.mMimeType)) {
m = new VPXDecoder(*aConfig.GetAsVideoInfo(),
aImageContainer,
- aVideoTaskQueue,
+ aTaskQueue,
aCallback);
}
return m.forget();
}
already_AddRefed<MediaDataDecoder>
AgnosticDecoderModule::CreateAudioDecoder(const AudioInfo& aConfig,
- FlushableTaskQueue* aAudioTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics)
{
RefPtr<MediaDataDecoder> m;
if (VorbisDataDecoder::IsVorbis(aConfig.mMimeType)) {
m = new VorbisDataDecoder(*aConfig.GetAsAudioInfo(),
- aAudioTaskQueue,
+ aTaskQueue,
aCallback);
} else if (OpusDataDecoder::IsOpus(aConfig.mMimeType)) {
m = new OpusDataDecoder(*aConfig.GetAsAudioInfo(),
- aAudioTaskQueue,
+ aTaskQueue,
aCallback);
} else if (WaveDataDecoder::IsWave(aConfig.mMimeType)) {
m = new WaveDataDecoder(*aConfig.GetAsAudioInfo(), aCallback);
}
return m.forget();
}
--- a/dom/media/platforms/agnostic/AgnosticDecoderModule.h
+++ b/dom/media/platforms/agnostic/AgnosticDecoderModule.h
@@ -20,23 +20,23 @@ public:
}
protected:
// Decode thread.
already_AddRefed<MediaDataDecoder>
CreateVideoDecoder(const VideoInfo& aConfig,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override;
// Decode thread.
already_AddRefed<MediaDataDecoder>
CreateAudioDecoder(const AudioInfo& aConfig,
- FlushableTaskQueue* aAudioTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override;
};
} // namespace mozilla
#endif /* AgnosticDecoderModule_h_ */
--- a/dom/media/platforms/agnostic/BlankDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/BlankDecoderModule.cpp
@@ -196,32 +196,32 @@ private:
class BlankDecoderModule : public PlatformDecoderModule {
public:
// Decode thread.
already_AddRefed<MediaDataDecoder>
CreateVideoDecoder(const VideoInfo& aConfig,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override {
BlankVideoDataCreator* creator = new BlankVideoDataCreator(
aConfig.mDisplay.width, aConfig.mDisplay.height, aImageContainer);
RefPtr<MediaDataDecoder> decoder =
new BlankMediaDataDecoder<BlankVideoDataCreator>(creator,
aCallback,
TrackInfo::kVideoTrack);
return decoder.forget();
}
// Decode thread.
already_AddRefed<MediaDataDecoder>
CreateAudioDecoder(const AudioInfo& aConfig,
- FlushableTaskQueue* aAudioTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override {
BlankAudioDataCreator* creator = new BlankAudioDataCreator(
aConfig.mChannels, aConfig.mRate);
RefPtr<MediaDataDecoder> decoder =
new BlankMediaDataDecoder<BlankAudioDataCreator>(creator,
aCallback,
--- a/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp
@@ -233,38 +233,38 @@ CreateDecoderWrapper(MediaDataDecoderCal
new EMEMediaDataDecoderProxy(thread.forget(), aCallback, aProxy, aTaskQueue));
return decoder.forget();
}
already_AddRefed<MediaDataDecoder>
EMEDecoderModule::CreateVideoDecoder(const VideoInfo& aConfig,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics)
{
MOZ_ASSERT(aConfig.mCrypto.mValid);
if (SupportsMimeType(aConfig.mMimeType, nullptr)) {
// GMP decodes. Assume that means it can decrypt too.
- RefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper(aCallback, mProxy, aVideoTaskQueue);
+ RefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper(aCallback, mProxy, aTaskQueue);
wrapper->SetProxyTarget(new EMEVideoDecoder(mProxy,
aConfig,
aLayersBackend,
aImageContainer,
- aVideoTaskQueue,
+ aTaskQueue,
wrapper->Callback()));
return wrapper.forget();
}
MOZ_ASSERT(mPDM);
RefPtr<MediaDataDecoder> decoder(
mPDM->CreateDecoder(aConfig,
- aVideoTaskQueue,
+ aTaskQueue,
aCallback,
aDiagnostics,
aLayersBackend,
aImageContainer));
if (!decoder) {
return nullptr;
}
@@ -272,35 +272,35 @@ EMEDecoderModule::CreateVideoDecoder(con
aCallback,
mProxy,
AbstractThread::GetCurrent()->AsTaskQueue()));
return emeDecoder.forget();
}
already_AddRefed<MediaDataDecoder>
EMEDecoderModule::CreateAudioDecoder(const AudioInfo& aConfig,
- FlushableTaskQueue* aAudioTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics)
{
MOZ_ASSERT(aConfig.mCrypto.mValid);
if (SupportsMimeType(aConfig.mMimeType, nullptr)) {
// GMP decodes. Assume that means it can decrypt too.
- RefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper(aCallback, mProxy, aAudioTaskQueue);
+ RefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper(aCallback, mProxy, aTaskQueue);
wrapper->SetProxyTarget(new EMEAudioDecoder(mProxy,
aConfig,
- aAudioTaskQueue,
+ aTaskQueue,
wrapper->Callback()));
return wrapper.forget();
}
MOZ_ASSERT(mPDM);
RefPtr<MediaDataDecoder> decoder(
- mPDM->CreateDecoder(aConfig, aAudioTaskQueue, aCallback, aDiagnostics));
+ mPDM->CreateDecoder(aConfig, aTaskQueue, aCallback, aDiagnostics));
if (!decoder) {
return nullptr;
}
RefPtr<MediaDataDecoder> emeDecoder(new EMEDecryptor(decoder,
aCallback,
mProxy,
AbstractThread::GetCurrent()->AsTaskQueue()));
--- a/dom/media/platforms/agnostic/eme/EMEDecoderModule.h
+++ b/dom/media/platforms/agnostic/eme/EMEDecoderModule.h
@@ -24,24 +24,24 @@ public:
virtual ~EMEDecoderModule();
protected:
// Decode thread.
already_AddRefed<MediaDataDecoder>
CreateVideoDecoder(const VideoInfo& aConfig,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override;
// Decode thread.
already_AddRefed<MediaDataDecoder>
CreateAudioDecoder(const AudioInfo& aConfig,
- FlushableTaskQueue* aAudioTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override;
ConversionRequired
DecoderNeedsConversion(const TrackInfo& aConfig) const override;
bool
SupportsMimeType(const nsACString& aMimeType,
--- a/dom/media/platforms/agnostic/gmp/GMPDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/gmp/GMPDecoderModule.cpp
@@ -44,17 +44,17 @@ CreateDecoderWrapper(MediaDataDecoderCal
RefPtr<MediaDataDecoderProxy> decoder(new MediaDataDecoderProxy(thread.forget(), aCallback));
return decoder.forget();
}
already_AddRefed<MediaDataDecoder>
GMPDecoderModule::CreateVideoDecoder(const VideoInfo& aConfig,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics)
{
if (!aConfig.mMimeType.EqualsLiteral("video/avc")) {
return nullptr;
}
if (aDiagnostics) {
@@ -63,41 +63,41 @@ GMPDecoderModule::CreateVideoDecoder(con
aDiagnostics->SetGMP(preferredGMP.value());
}
}
RefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper(aCallback);
wrapper->SetProxyTarget(new GMPVideoDecoder(aConfig,
aLayersBackend,
aImageContainer,
- aVideoTaskQueue,
+ aTaskQueue,
wrapper->Callback()));
return wrapper.forget();
}
already_AddRefed<MediaDataDecoder>
GMPDecoderModule::CreateAudioDecoder(const AudioInfo& aConfig,
- FlushableTaskQueue* aAudioTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics)
{
if (!aConfig.mMimeType.EqualsLiteral("audio/mp4a-latm")) {
return nullptr;
}
if (aDiagnostics) {
const Maybe<nsCString> preferredGMP = PreferredGMP(aConfig.mMimeType);
if (preferredGMP.isSome()) {
aDiagnostics->SetGMP(preferredGMP.value());
}
}
RefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper(aCallback);
wrapper->SetProxyTarget(new GMPAudioDecoder(aConfig,
- aAudioTaskQueue,
+ aTaskQueue,
wrapper->Callback()));
return wrapper.forget();
}
PlatformDecoderModule::ConversionRequired
GMPDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
{
// GMPVideoCodecType::kGMPVideoCodecH264 specifies that encoded frames must be in AVCC format.
--- a/dom/media/platforms/agnostic/gmp/GMPDecoderModule.h
+++ b/dom/media/platforms/agnostic/gmp/GMPDecoderModule.h
@@ -31,24 +31,24 @@ public:
virtual ~GMPDecoderModule();
// Decode thread.
already_AddRefed<MediaDataDecoder>
CreateVideoDecoder(const VideoInfo& aConfig,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override;
// Decode thread.
already_AddRefed<MediaDataDecoder>
CreateAudioDecoder(const AudioInfo& aConfig,
- FlushableTaskQueue* aAudioTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override;
ConversionRequired
DecoderNeedsConversion(const TrackInfo& aConfig) const override;
bool
SupportsMimeType(const nsACString& aMimeType,
--- a/dom/media/platforms/android/AndroidDecoderModule.cpp
+++ b/dom/media/platforms/android/AndroidDecoderModule.cpp
@@ -264,33 +264,33 @@ AndroidDecoderModule::SupportsMimeType(c
// Accessing a stale local reference leading to a SIGSEGV crash.
// To avoid this we check for wav types here.
if (aMimeType.EqualsLiteral("audio/x-wav") ||
aMimeType.EqualsLiteral("audio/wave; codecs=1") ||
aMimeType.EqualsLiteral("audio/wave; codecs=6") ||
aMimeType.EqualsLiteral("audio/wave; codecs=7") ||
aMimeType.EqualsLiteral("audio/wave; codecs=65534")) {
return false;
- }
+ }
if ((VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP8) &&
!GetFeatureStatus(nsIGfxInfo::FEATURE_VP8_HW_DECODE)) ||
(VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP9) &&
!GetFeatureStatus(nsIGfxInfo::FEATURE_VP9_HW_DECODE))) {
return false;
}
return widget::HardwareCodecCapabilityUtils::FindDecoderCodecInfoForMimeType(
nsCString(TranslateMimeType(aMimeType)));
}
already_AddRefed<MediaDataDecoder>
AndroidDecoderModule::CreateVideoDecoder(
const VideoInfo& aConfig, layers::LayersBackend aLayersBackend,
- layers::ImageContainer* aImageContainer, FlushableTaskQueue* aVideoTaskQueue,
+ layers::ImageContainer* aImageContainer, TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics)
{
MediaFormat::LocalRef format;
NS_ENSURE_SUCCESS(MediaFormat::CreateVideoFormat(
TranslateMimeType(aConfig.mMimeType),
aConfig.mDisplay.width,
@@ -300,17 +300,17 @@ AndroidDecoderModule::CreateVideoDecoder
RefPtr<MediaDataDecoder> decoder =
new VideoDataDecoder(aConfig, format, aCallback, aImageContainer);
return decoder.forget();
}
already_AddRefed<MediaDataDecoder>
AndroidDecoderModule::CreateAudioDecoder(
- const AudioInfo& aConfig, FlushableTaskQueue* aAudioTaskQueue,
+ const AudioInfo& aConfig, TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics)
{
MOZ_ASSERT(aConfig.mBitDepth == 16, "We only handle 16-bit audio!");
MediaFormat::LocalRef format;
LOG("CreateAudioFormat with mimeType=%s, mRate=%d, channels=%d",
--- a/dom/media/platforms/android/AndroidDecoderModule.h
+++ b/dom/media/platforms/android/AndroidDecoderModule.h
@@ -20,23 +20,23 @@ namespace mozilla {
typedef std::deque<RefPtr<MediaRawData>> SampleQueue;
class AndroidDecoderModule : public PlatformDecoderModule {
public:
already_AddRefed<MediaDataDecoder>
CreateVideoDecoder(const VideoInfo& aConfig,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override;
already_AddRefed<MediaDataDecoder>
CreateAudioDecoder(const AudioInfo& aConfig,
- FlushableTaskQueue* aAudioTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override;
AndroidDecoderModule() {}
virtual ~AndroidDecoderModule() {}
bool SupportsMimeType(const nsACString& aMimeType,
--- a/dom/media/platforms/apple/AppleDecoderModule.cpp
+++ b/dom/media/platforms/apple/AppleDecoderModule.cpp
@@ -72,49 +72,49 @@ AppleDecoderModule::Startup()
}
return NS_OK;
}
already_AddRefed<MediaDataDecoder>
AppleDecoderModule::CreateVideoDecoder(const VideoInfo& aConfig,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics)
{
RefPtr<MediaDataDecoder> decoder;
if (sIsVDAAvailable && (!sIsVTHWAvailable || MediaPrefs::AppleForceVDA())) {
decoder =
AppleVDADecoder::CreateVDADecoder(aConfig,
- aVideoTaskQueue,
+ aTaskQueue,
aCallback,
aImageContainer);
if (decoder) {
return decoder.forget();
}
}
// We fallback here if VDA isn't available, or is available but isn't
// supported by the current platform.
if (sIsVTAvailable) {
decoder =
- new AppleVTDecoder(aConfig, aVideoTaskQueue, aCallback, aImageContainer);
+ new AppleVTDecoder(aConfig, aTaskQueue, aCallback, aImageContainer);
}
return decoder.forget();
}
already_AddRefed<MediaDataDecoder>
AppleDecoderModule::CreateAudioDecoder(const AudioInfo& aConfig,
- FlushableTaskQueue* aAudioTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics)
{
RefPtr<MediaDataDecoder> decoder =
- new AppleATDecoder(aConfig, aAudioTaskQueue, aCallback);
+ new AppleATDecoder(aConfig, aTaskQueue, aCallback);
return decoder.forget();
}
bool
AppleDecoderModule::SupportsMimeType(const nsACString& aMimeType,
DecoderDoctorDiagnostics* aDiagnostics) const
{
return (sIsCoreMediaAvailable &&
--- a/dom/media/platforms/apple/AppleDecoderModule.h
+++ b/dom/media/platforms/apple/AppleDecoderModule.h
@@ -18,24 +18,24 @@ public:
nsresult Startup() override;
// Decode thread.
already_AddRefed<MediaDataDecoder>
CreateVideoDecoder(const VideoInfo& aConfig,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override;
// Decode thread.
already_AddRefed<MediaDataDecoder>
CreateAudioDecoder(const AudioInfo& aConfig,
- FlushableTaskQueue* aAudioTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override;
bool SupportsMimeType(const nsACString& aMimeType,
DecoderDoctorDiagnostics* aDiagnostics) const override;
ConversionRequired
DecoderNeedsConversion(const TrackInfo& aConfig) const override;
--- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
@@ -10,17 +10,17 @@
#include "TimeUnits.h"
#define MAX_CHANNELS 16
namespace mozilla
{
FFmpegAudioDecoder<LIBAV_VER>::FFmpegAudioDecoder(FFmpegLibWrapper* aLib,
- FlushableTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
+ TaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
const AudioInfo& aConfig)
: FFmpegDataDecoder(aLib, aTaskQueue, aCallback, GetCodecId(aConfig.mMimeType))
{
MOZ_COUNT_CTOR(FFmpegAudioDecoder);
// Use a new MediaByteBuffer as the object will be modified during initialization.
mExtraData = new MediaByteBuffer;
mExtraData->AppendElements(*aConfig.mCodecSpecificConfig);
}
--- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h
@@ -16,17 +16,17 @@ namespace mozilla
template <int V> class FFmpegAudioDecoder
{
};
template <>
class FFmpegAudioDecoder<LIBAV_VER> : public FFmpegDataDecoder<LIBAV_VER>
{
public:
- FFmpegAudioDecoder(FFmpegLibWrapper* aLib, FlushableTaskQueue* aTaskQueue,
+ FFmpegAudioDecoder(FFmpegLibWrapper* aLib, TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
const AudioInfo& aConfig);
virtual ~FFmpegAudioDecoder();
RefPtr<InitPromise> Init() override;
void InitCodecContext() override;
static AVCodecID GetCodecId(const nsACString& aMimeType);
const char* GetDescriptionName() const override
--- a/dom/media/platforms/ffmpeg/FFmpegDecoderModule.h
+++ b/dom/media/platforms/ffmpeg/FFmpegDecoderModule.h
@@ -29,37 +29,37 @@ public:
explicit FFmpegDecoderModule(FFmpegLibWrapper* aLib) : mLib(aLib) {}
virtual ~FFmpegDecoderModule() {}
already_AddRefed<MediaDataDecoder>
CreateVideoDecoder(const VideoInfo& aConfig,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override
{
RefPtr<MediaDataDecoder> decoder =
- new FFmpegVideoDecoder<V>(mLib, aVideoTaskQueue, aCallback, aConfig,
+ new FFmpegVideoDecoder<V>(mLib, aTaskQueue, aCallback, aConfig,
aImageContainer);
return decoder.forget();
}
already_AddRefed<MediaDataDecoder>
CreateAudioDecoder(const AudioInfo& aConfig,
- FlushableTaskQueue* aAudioTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override
{
#ifdef USING_MOZFFVPX
return nullptr;
#else
RefPtr<MediaDataDecoder> decoder =
- new FFmpegAudioDecoder<V>(mLib, aAudioTaskQueue, aCallback, aConfig);
+ new FFmpegAudioDecoder<V>(mLib, aTaskQueue, aCallback, aConfig);
return decoder.forget();
#endif
}
bool SupportsMimeType(const nsACString& aMimeType,
DecoderDoctorDiagnostics* aDiagnostics) const override
{
AVCodecID videoCodec = FFmpegVideoDecoder<V>::GetCodecId(aMimeType);
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -96,17 +96,17 @@ FFmpegVideoDecoder<LIBAV_VER>::PtsCorrec
{
mNumFaultyPts = 0;
mNumFaultyDts = 0;
mLastPts = INT64_MIN;
mLastDts = INT64_MIN;
}
FFmpegVideoDecoder<LIBAV_VER>::FFmpegVideoDecoder(FFmpegLibWrapper* aLib,
- FlushableTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
+ TaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
const VideoInfo& aConfig,
ImageContainer* aImageContainer)
: FFmpegDataDecoder(aLib, aTaskQueue, aCallback, GetCodecId(aConfig.mMimeType))
, mImageContainer(aImageContainer)
, mInfo(aConfig)
, mCodecParser(nullptr)
, mLastInputDts(INT64_MIN)
{
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
@@ -22,17 +22,17 @@ class FFmpegVideoDecoder : public FFmpeg
template <>
class FFmpegVideoDecoder<LIBAV_VER> : public FFmpegDataDecoder<LIBAV_VER>
{
typedef mozilla::layers::Image Image;
typedef mozilla::layers::ImageContainer ImageContainer;
public:
- FFmpegVideoDecoder(FFmpegLibWrapper* aLib, FlushableTaskQueue* aTaskQueue,
+ FFmpegVideoDecoder(FFmpegLibWrapper* aLib, TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
const VideoInfo& aConfig,
ImageContainer* aImageContainer);
virtual ~FFmpegVideoDecoder();
RefPtr<InitPromise> Init() override;
void InitCodecContext() override;
const char* GetDescriptionName() const override
--- a/dom/media/platforms/gonk/GonkDecoderModule.cpp
+++ b/dom/media/platforms/gonk/GonkDecoderModule.cpp
@@ -17,29 +17,29 @@ GonkDecoderModule::GonkDecoderModule()
GonkDecoderModule::~GonkDecoderModule()
{
}
already_AddRefed<MediaDataDecoder>
GonkDecoderModule::CreateVideoDecoder(const VideoInfo& aConfig,
mozilla::layers::LayersBackend aLayersBackend,
mozilla::layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics)
{
RefPtr<MediaDataDecoder> decoder =
new GonkMediaDataDecoder(new GonkVideoDecoderManager(aImageContainer, aConfig),
aCallback);
return decoder.forget();
}
already_AddRefed<MediaDataDecoder>
GonkDecoderModule::CreateAudioDecoder(const AudioInfo& aConfig,
- FlushableTaskQueue* aAudioTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics)
{
RefPtr<MediaDataDecoder> decoder =
new GonkMediaDataDecoder(new GonkAudioDecoderManager(aConfig),
aCallback);
return decoder.forget();
}
--- a/dom/media/platforms/gonk/GonkDecoderModule.h
+++ b/dom/media/platforms/gonk/GonkDecoderModule.h
@@ -16,24 +16,24 @@ public:
GonkDecoderModule();
virtual ~GonkDecoderModule();
// Decode thread.
already_AddRefed<MediaDataDecoder>
CreateVideoDecoder(const VideoInfo& aConfig,
mozilla::layers::LayersBackend aLayersBackend,
mozilla::layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override;
// Decode thread.
already_AddRefed<MediaDataDecoder>
CreateAudioDecoder(const AudioInfo& aConfig,
- FlushableTaskQueue* aAudioTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override;
ConversionRequired
DecoderNeedsConversion(const TrackInfo& aConfig) const override;
bool SupportsMimeType(const nsACString& aMimeType,
DecoderDoctorDiagnostics* aDiagnostics) const override;
--- a/dom/media/platforms/omx/OmxDecoderModule.cpp
+++ b/dom/media/platforms/omx/OmxDecoderModule.cpp
@@ -10,27 +10,27 @@
#include "OmxPlatformLayer.h"
namespace mozilla {
already_AddRefed<MediaDataDecoder>
OmxDecoderModule::CreateVideoDecoder(const VideoInfo& aConfig,
mozilla::layers::LayersBackend aLayersBackend,
mozilla::layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics)
{
RefPtr<OmxDataDecoder> decoder = new OmxDataDecoder(aConfig, aCallback, aImageContainer);
return decoder.forget();
}
already_AddRefed<MediaDataDecoder>
OmxDecoderModule::CreateAudioDecoder(const AudioInfo& aConfig,
- FlushableTaskQueue* aAudioTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics)
{
RefPtr<OmxDataDecoder> decoder = new OmxDataDecoder(aConfig, aCallback, nullptr);
return decoder.forget();
}
PlatformDecoderModule::ConversionRequired
--- a/dom/media/platforms/omx/OmxDecoderModule.h
+++ b/dom/media/platforms/omx/OmxDecoderModule.h
@@ -12,23 +12,23 @@
namespace mozilla {
class OmxDecoderModule : public PlatformDecoderModule {
public:
already_AddRefed<MediaDataDecoder>
CreateVideoDecoder(const VideoInfo& aConfig,
mozilla::layers::LayersBackend aLayersBackend,
mozilla::layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override;
already_AddRefed<MediaDataDecoder>
CreateAudioDecoder(const AudioInfo& aConfig,
- FlushableTaskQueue* aAudioTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override;
bool SupportsMimeType(const nsACString& aMimeType,
DecoderDoctorDiagnostics* aDiagnostics) const override;
ConversionRequired DecoderNeedsConversion(const TrackInfo& aConfig) const override;
};
--- a/dom/media/platforms/wmf/WMFDecoderModule.cpp
+++ b/dom/media/platforms/wmf/WMFDecoderModule.cpp
@@ -73,50 +73,50 @@ WMFDecoderModule::Startup()
mWMFInitialized = SUCCEEDED(wmf::MFStartup());
return mWMFInitialized ? NS_OK : NS_ERROR_FAILURE;
}
already_AddRefed<MediaDataDecoder>
WMFDecoderModule::CreateVideoDecoder(const VideoInfo& aConfig,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics)
{
nsAutoPtr<WMFVideoMFTManager> manager(
new WMFVideoMFTManager(aConfig,
aLayersBackend,
aImageContainer,
sDXVAEnabled));
if (!manager->Init()) {
return nullptr;
}
RefPtr<MediaDataDecoder> decoder =
- new WMFMediaDataDecoder(manager.forget(), aVideoTaskQueue, aCallback);
+ new WMFMediaDataDecoder(manager.forget(), aTaskQueue, aCallback);
return decoder.forget();
}
already_AddRefed<MediaDataDecoder>
WMFDecoderModule::CreateAudioDecoder(const AudioInfo& aConfig,
- FlushableTaskQueue* aAudioTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics)
{
nsAutoPtr<WMFAudioMFTManager> manager(new WMFAudioMFTManager(aConfig));
if (!manager->Init()) {
return nullptr;
}
RefPtr<MediaDataDecoder> decoder =
- new WMFMediaDataDecoder(manager.forget(), aAudioTaskQueue, aCallback);
+ new WMFMediaDataDecoder(manager.forget(), aTaskQueue, aCallback);
return decoder.forget();
}
static bool
CanCreateMFTDecoder(const GUID& aGuid)
{
if (FAILED(wmf::MFStartup())) {
return false;
--- a/dom/media/platforms/wmf/WMFDecoderModule.h
+++ b/dom/media/platforms/wmf/WMFDecoderModule.h
@@ -18,23 +18,23 @@ public:
// Initializes the module, loads required dynamic libraries, etc.
nsresult Startup() override;
already_AddRefed<MediaDataDecoder>
CreateVideoDecoder(const VideoInfo& aConfig,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override;
already_AddRefed<MediaDataDecoder>
CreateAudioDecoder(const AudioInfo& aConfig,
- FlushableTaskQueue* aAudioTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics) override;
bool SupportsMimeType(const nsACString& aMimeType,
DecoderDoctorDiagnostics* aDiagnostics) const override;
ConversionRequired
DecoderNeedsConversion(const TrackInfo& aConfig) const override;
--- a/dom/media/platforms/wrappers/H264Converter.cpp
+++ b/dom/media/platforms/wrappers/H264Converter.cpp
@@ -14,25 +14,25 @@
namespace mozilla
{
H264Converter::H264Converter(PlatformDecoderModule* aPDM,
const VideoInfo& aConfig,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics)
: mPDM(aPDM)
, mOriginalConfig(aConfig)
, mCurrentConfig(aConfig)
, mLayersBackend(aLayersBackend)
, mImageContainer(aImageContainer)
- , mVideoTaskQueue(aVideoTaskQueue)
+ , mTaskQueue(aTaskQueue)
, mCallback(aCallback)
, mDecoder(nullptr)
, mNeedAVCC(aPDM->DecoderNeedsConversion(aConfig) == PlatformDecoderModule::kNeedAVCC)
, mLastError(NS_OK)
{
CreateDecoder(aDiagnostics);
}
@@ -144,17 +144,17 @@ H264Converter::CreateDecoder(DecoderDoct
// When using a decoder handling AnnexB, we get here only once from the
// constructor. We do want to get the dimensions extracted from the SPS.
mOriginalConfig = mCurrentConfig;
}
mDecoder = mPDM->CreateVideoDecoder(mNeedAVCC ? mCurrentConfig : mOriginalConfig,
mLayersBackend,
mImageContainer,
- mVideoTaskQueue,
+ mTaskQueue,
mCallback,
aDiagnostics);
if (!mDecoder) {
mLastError = NS_ERROR_FAILURE;
return NS_ERROR_FAILURE;
}
return NS_OK;
}
--- a/dom/media/platforms/wrappers/H264Converter.h
+++ b/dom/media/platforms/wrappers/H264Converter.h
@@ -20,17 +20,17 @@ namespace mozilla {
class H264Converter : public MediaDataDecoder {
public:
H264Converter(PlatformDecoderModule* aPDM,
const VideoInfo& aConfig,
layers::LayersBackend aLayersBackend,
layers::ImageContainer* aImageContainer,
- FlushableTaskQueue* aVideoTaskQueue,
+ TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
DecoderDoctorDiagnostics* aDiagnostics);
virtual ~H264Converter();
RefPtr<InitPromise> Init() override;
nsresult Input(MediaRawData* aSample) override;
nsresult Flush() override;
nsresult Drain() override;
@@ -60,17 +60,17 @@ private:
void OnDecoderInitDone(const TrackType aTrackType);
void OnDecoderInitFailed(MediaDataDecoder::DecoderFailureReason aReason);
RefPtr<PlatformDecoderModule> mPDM;
VideoInfo mOriginalConfig;
VideoInfo mCurrentConfig;
layers::LayersBackend mLayersBackend;
RefPtr<layers::ImageContainer> mImageContainer;
- RefPtr<FlushableTaskQueue> mVideoTaskQueue;
+ const RefPtr<TaskQueue> mTaskQueue;
nsTArray<RefPtr<MediaRawData>> mMediaRawSamples;
MediaDataDecoderCallback* mCallback;
RefPtr<MediaDataDecoder> mDecoder;
MozPromiseRequestHolder<InitPromise> mInitPromiseRequest;
bool mNeedAVCC;
nsresult mLastError;
};