Bug 1271508. Part 2 - rename functions so they are the same as those of FFmpegAudioDecoder so it would be easier to extract common code to the parent class. r=jya.
MozReview-Commit-ID: K4GPCtj6pFG
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -154,20 +154,18 @@ FFmpegVideoDecoder<LIBAV_VER>::InitCodec
mCodecParser = mLib->av_parser_init(mCodecID);
if (mCodecParser) {
mCodecParser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
}
}
FFmpegVideoDecoder<LIBAV_VER>::DecodeResult
-FFmpegVideoDecoder<LIBAV_VER>::DoDecodeFrame(MediaRawData* aSample)
+FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample)
{
- MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
-
uint8_t* inputData = const_cast<uint8_t*>(aSample->Data());
size_t inputSize = aSample->Size();
#if LIBAVCODEC_VERSION_MAJOR >= 54
if (inputSize && mCodecParser && (mCodecID == AV_CODEC_ID_VP8
#if LIBAVCODEC_VERSION_MAJOR >= 55
|| mCodecID == AV_CODEC_ID_VP9
#endif
@@ -182,39 +180,37 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecodeF
aSample->mOffset);
if (size_t(len) > inputSize) {
mCallback->Error();
return DecodeResult::DECODE_ERROR;
}
inputData += len;
inputSize -= len;
if (size) {
- switch (DoDecodeFrame(aSample, data, size)) {
+ switch (DoDecode(aSample, data, size)) {
case DecodeResult::DECODE_ERROR:
return DecodeResult::DECODE_ERROR;
case DecodeResult::DECODE_FRAME:
gotFrame = true;
break;
default:
break;
}
}
}
return gotFrame ? DecodeResult::DECODE_FRAME : DecodeResult::DECODE_NO_FRAME;
}
#endif
- return DoDecodeFrame(aSample, inputData, inputSize);
+ return DoDecode(aSample, inputData, inputSize);
}
FFmpegVideoDecoder<LIBAV_VER>::DecodeResult
-FFmpegVideoDecoder<LIBAV_VER>::DoDecodeFrame(MediaRawData* aSample,
- uint8_t* aData, int aSize)
+FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
+ uint8_t* aData, int aSize)
{
- MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
-
AVPacket packet;
mLib->av_init_packet(&packet);
packet.data = aData;
packet.size = aSize;
packet.dts = aSample->mTimecode;
packet.pts = aSample->mTime;
packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
@@ -312,45 +308,39 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecodeF
}
mCallback->Output(v);
return DecodeResult::DECODE_FRAME;
}
return DecodeResult::DECODE_NO_FRAME;
}
void
-FFmpegVideoDecoder<LIBAV_VER>::DecodeFrame(MediaRawData* aSample)
+FFmpegVideoDecoder<LIBAV_VER>::Decode(MediaRawData* aSample)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
-
- if (DoDecodeFrame(aSample) != DecodeResult::DECODE_ERROR &&
- mTaskQueue->IsEmpty()) {
+ if (DoDecode(aSample) != DecodeResult::DECODE_ERROR && mTaskQueue->IsEmpty()) {
mCallback->InputExhausted();
}
}
nsresult
FFmpegVideoDecoder<LIBAV_VER>::Input(MediaRawData* aSample)
{
- nsCOMPtr<nsIRunnable> runnable(
- NewRunnableMethod<RefPtr<MediaRawData>>(
- this, &FFmpegVideoDecoder<LIBAV_VER>::DecodeFrame,
- RefPtr<MediaRawData>(aSample)));
- mTaskQueue->Dispatch(runnable.forget());
-
+ mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
+ this, &FFmpegVideoDecoder::Decode, aSample));
return NS_OK;
}
void
FFmpegVideoDecoder<LIBAV_VER>::ProcessDrain()
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
RefPtr<MediaRawData> empty(new MediaRawData());
empty->mTimecode = mPtsContext.LastDts();
- while (DoDecodeFrame(empty) == DecodeResult::DECODE_FRAME) {
+ while (DoDecode(empty) == DecodeResult::DECODE_FRAME) {
}
mCallback->DrainComplete();
}
void
FFmpegVideoDecoder<LIBAV_VER>::ProcessFlush()
{
mPtsContext.Reset();
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
@@ -50,19 +50,19 @@ public:
return "ffvpx video decoder";
#else
return "ffmpeg video decoder";
#endif
}
static AVCodecID GetCodecId(const nsACString& aMimeType);
private:
- void DecodeFrame(MediaRawData* aSample);
- DecodeResult DoDecodeFrame(MediaRawData* aSample);
- DecodeResult DoDecodeFrame(MediaRawData* aSample, uint8_t* aData, int aSize);
+ void Decode(MediaRawData* aSample);
+ DecodeResult DoDecode(MediaRawData* aSample);
+ DecodeResult DoDecode(MediaRawData* aSample, uint8_t* aData, int aSize);
void DoDrain();
void OutputDelayedFrames();
/**
* This method allocates a buffer for FFmpeg's decoder, wrapped in an Image.
* Currently it only supports Planar YUV420, which appears to be the only
* non-hardware accelerated image format that FFmpeg's H264 decoder is
* capable of outputting.