Bug 1299072: P15. Provide additional error details for most remaining decoders. r?gerald draft
authorJean-Yves Avenard <jyavenard@mozilla.com>
Tue, 13 Sep 2016 11:15:36 +1000
changeset 412880 890847a8781ee2f9ce59622f5e50a778ead73f53
parent 412879 f13fb6022a04afb1918343d31ef6493f6b3d0401
child 412881 838e0c63e017bd15ba3a6d20ff84cd76ec1eb3ef
push id29276
push userbmo:jyavenard@mozilla.com
push dateTue, 13 Sep 2016 03:29:20 +0000
reviewersgerald
bugs1299072
milestone51.0a1
Bug 1299072: P15. Provide additional error details for most remaining decoders. r?gerald Additionally, mark non fatal decoding error as such. Due to the complexity of WMF decoder error handling, this will be done in a follow up bug. MozReview-Commit-ID: KHWORM8899c
dom/media/platforms/agnostic/OpusDecoder.cpp
dom/media/platforms/agnostic/OpusDecoder.h
dom/media/platforms/agnostic/TheoraDecoder.cpp
dom/media/platforms/agnostic/TheoraDecoder.h
dom/media/platforms/agnostic/VPXDecoder.cpp
dom/media/platforms/agnostic/VPXDecoder.h
dom/media/platforms/agnostic/VorbisDecoder.cpp
dom/media/platforms/agnostic/VorbisDecoder.h
dom/media/platforms/agnostic/WAVDecoder.cpp
dom/media/platforms/agnostic/WAVDecoder.h
dom/media/platforms/apple/AppleVTDecoder.cpp
dom/media/platforms/apple/AppleVTDecoder.h
dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h
dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
dom/media/platforms/ffmpeg/FFmpegDataDecoder.h
dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
--- a/dom/media/platforms/agnostic/OpusDecoder.cpp
+++ b/dom/media/platforms/agnostic/OpusDecoder.cpp
@@ -146,91 +146,83 @@ OpusDataDecoder::Input(MediaRawData* aSa
 
 void
 OpusDataDecoder::ProcessDecode(MediaRawData* aSample)
 {
   if (mIsFlushing) {
     return;
   }
 
-  DecodeError err = DoDecode(aSample);
-  switch (err) {
-    case DecodeError::FATAL_ERROR:
-      mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
-                                        __func__));
-      return;
-    case DecodeError::DECODE_ERROR:
-      mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
-                                        __func__));
-      break;
-    case DecodeError::DECODE_SUCCESS:
-      mCallback->InputExhausted();
-      break;
+  MediaResult rv = DoDecode(aSample);
+  if (NS_FAILED(rv)) {
+    mCallback->Error(rv);
+    return;
   }
+  mCallback->InputExhausted();
 }
 
-OpusDataDecoder::DecodeError
+MediaResult
 OpusDataDecoder::DoDecode(MediaRawData* aSample)
 {
   int64_t aDiscardPadding = 0;
   if (aSample->mExtraData) {
     aDiscardPadding = BigEndian::readInt64(aSample->mExtraData->Elements());
   }
   uint32_t channels = mOpusParser->mChannels;
 
   if (mPaddingDiscarded) {
     // Discard padding should be used only on the final packet, so
     // decoding after a padding discard is invalid.
     OPUS_DEBUG("Opus error, discard padding on interstitial packet");
-    return FATAL_ERROR;
+    return NS_ERROR_DOM_MEDIA_FATAL_ERR;
   }
 
   if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime) {
     // We are starting a new block.
     mFrames = 0;
     mLastFrameTime = Some(aSample->mTime);
   }
 
   // Maximum value is 63*2880, so there's no chance of overflow.
   int32_t frames_number = opus_packet_get_nb_frames(aSample->Data(),
                                                     aSample->Size());
   if (frames_number <= 0) {
     OPUS_DEBUG("Invalid packet header: r=%ld length=%ld",
                frames_number, aSample->Size());
-    return FATAL_ERROR;
+    return NS_ERROR_DOM_MEDIA_DECODE_ERR;
   }
 
   int32_t samples = opus_packet_get_samples_per_frame(aSample->Data(),
                                            opus_int32(mOpusParser->mRate));
 
 
   // A valid Opus packet must be between 2.5 and 120 ms long (48kHz).
   int32_t frames = frames_number*samples;
   if (frames < 120 || frames > 5760) {
     OPUS_DEBUG("Invalid packet frames: %ld", frames);
-    return FATAL_ERROR;
+    return NS_ERROR_DOM_MEDIA_DECODE_ERR;
   }
 
   AlignedAudioBuffer buffer(frames * channels);
   if (!buffer) {
-    return FATAL_ERROR;
+    return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
   }
 
   // Decode to the appropriate sample type.
 #ifdef MOZ_SAMPLE_TYPE_FLOAT32
   int ret = opus_multistream_decode_float(mOpusDecoder,
                                           aSample->Data(), aSample->Size(),
                                           buffer.get(), frames, false);
 #else
   int ret = opus_multistream_decode(mOpusDecoder,
                                     aSample->Data(), aSample->Size(),
                                     buffer.get(), frames, false);
 #endif
   if (ret < 0) {
-    return DECODE_ERROR;
+    return NS_ERROR_DOM_MEDIA_DECODE_ERR;
   }
   NS_ASSERTION(ret == frames, "Opus decoded too few audio samples");
   CheckedInt64 startTime = aSample->mTime;
 
   // Trim the initial frames while the decoder is settling.
   if (mSkip > 0) {
     int32_t skipFrames = std::min<int32_t>(mSkip, frames);
     int32_t keepFrames = frames - skipFrames;
@@ -241,31 +233,31 @@ OpusDataDecoder::DoDecode(MediaRawData* 
     startTime = startTime + FramesToUsecs(skipFrames, mOpusParser->mRate);
     frames = keepFrames;
     mSkip -= skipFrames;
   }
 
   if (aDiscardPadding < 0) {
     // Negative discard padding is invalid.
     OPUS_DEBUG("Opus error, negative discard padding");
-    return FATAL_ERROR;
+    return NS_ERROR_DOM_MEDIA_FATAL_ERR;
   }
   if (aDiscardPadding > 0) {
     OPUS_DEBUG("OpusDecoder discardpadding %" PRId64 "", aDiscardPadding);
     CheckedInt64 discardFrames =
       TimeUnitToFrames(media::TimeUnit::FromNanoseconds(aDiscardPadding),
                        mOpusParser->mRate);
     if (!discardFrames.isValid()) {
       NS_WARNING("Int overflow in DiscardPadding");
-      return FATAL_ERROR;
+      return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
     }
     if (discardFrames.value() > frames) {
       // Discarding more than the entire packet is invalid.
       OPUS_DEBUG("Opus error, discard padding larger than packet");
-      return FATAL_ERROR;
+      return NS_ERROR_DOM_MEDIA_FATAL_ERR;
     }
     OPUS_DEBUG("Opus decoder discarding %d of %d frames",
         int32_t(discardFrames.value()), frames);
     // Padding discard is only supposed to happen on the final packet.
     // Record the discard so we can return an error if another packet is
     // decoded.
     mPaddingDiscarded = true;
     int32_t keepFrames = frames - discardFrames.value();
@@ -290,35 +282,35 @@ OpusDataDecoder::DoDecode(MediaRawData* 
       buffer[i] = static_cast<AudioDataValue>(MOZ_CLIP_TO_15(val));
     }
   }
 #endif
 
   CheckedInt64 duration = FramesToUsecs(frames, mOpusParser->mRate);
   if (!duration.isValid()) {
     NS_WARNING("OpusDataDecoder: Int overflow converting WebM audio duration");
-    return FATAL_ERROR;
+    return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
   }
   CheckedInt64 time =
     startTime - FramesToUsecs(mOpusParser->mPreSkip, mOpusParser->mRate) +
     FramesToUsecs(mFrames, mOpusParser->mRate);
   if (!time.isValid()) {
     NS_WARNING("OpusDataDecoder: Int overflow shifting tstamp by codec delay");
-    return FATAL_ERROR;
+    return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
   };
 
   mCallback->Output(new AudioData(aSample->mOffset,
                                   time.value(),
                                   duration.value(),
                                   frames,
                                   Move(buffer),
                                   mOpusParser->mChannels,
                                   mOpusParser->mRate));
   mFrames += frames;
-  return DECODE_SUCCESS;
+  return NS_OK;
 }
 
 void
 OpusDataDecoder::ProcessDrain()
 {
   mCallback->DrainComplete();
 }
 
--- a/dom/media/platforms/agnostic/OpusDecoder.h
+++ b/dom/media/platforms/agnostic/OpusDecoder.h
@@ -36,26 +36,20 @@ public:
   // Pack pre-skip/CodecDelay, given in microseconds, into a
   // MediaByteBuffer. The decoder expects this value to come
   // from the container (if any) and to precede the OpusHead
   // block in the CodecSpecificConfig buffer to verify the
   // values match.
   static void AppendCodecDelay(MediaByteBuffer* config, uint64_t codecDelayUS);
 
 private:
-  enum DecodeError {
-    DECODE_SUCCESS,
-    DECODE_ERROR,
-    FATAL_ERROR
-  };
-
   nsresult DecodeHeader(const unsigned char* aData, size_t aLength);
 
   void ProcessDecode(MediaRawData* aSample);
-  DecodeError DoDecode(MediaRawData* aSample);
+  MediaResult DoDecode(MediaRawData* aSample);
   void ProcessDrain();
 
   const AudioInfo& mInfo;
   const RefPtr<TaskQueue> mTaskQueue;
   MediaDataDecoderCallback* mCallback;
 
   // Opus decoder state
   nsAutoPtr<OpusParser> mOpusParser;
--- a/dom/media/platforms/agnostic/TheoraDecoder.cpp
+++ b/dom/media/platforms/agnostic/TheoraDecoder.cpp
@@ -118,17 +118,17 @@ TheoraDecoder::DoDecodeHeader(const unsi
 
   int r = th_decode_headerin(&mTheoraInfo,
                              &mTheoraComment,
                              &mTheoraSetupInfo,
                              &pkt);
   return r > 0 ? NS_OK : NS_ERROR_FAILURE;
 }
 
-int
+MediaResult
 TheoraDecoder::DoDecode(MediaRawData* aSample)
 {
   MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
 
   const unsigned char* aData = aSample->Data();
   size_t aLength = aSample->Size();
 
   bool bos = mPacketCount == 0;
@@ -176,36 +176,36 @@ TheoraDecoder::DoDecode(MediaRawData* aS
                                    aSample->mKeyframe,
                                    aSample->mTimecode,
                                    mInfo.ScaledImageRect(mTheoraInfo.frame_width,
                                                          mTheoraInfo.frame_height));
     if (!v) {
       LOG("Image allocation error source %ldx%ld display %ldx%ld picture %ldx%ld",
           mTheoraInfo.frame_width, mTheoraInfo.frame_height, mInfo.mDisplay.width, mInfo.mDisplay.height,
           mInfo.mImage.width, mInfo.mImage.height);
-      return -1;
+      return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
     }
     mCallback->Output(v);
-    return 0;
+    return NS_OK;
   } else {
     LOG("Theora Decode error: %d", ret);
-    return -1;
+    return NS_ERROR_DOM_MEDIA_DECODE_ERR;
   }
 }
 
 void
 TheoraDecoder::ProcessDecode(MediaRawData* aSample)
 {
   MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
   if (mIsFlushing) {
     return;
   }
-  if (DoDecode(aSample) == -1) {
-    mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
-                                      __func__));
+  MediaResult rv = DoDecode(aSample);
+  if (NS_FAILED(rv)) {
+    mCallback->Error(rv);
   } else {
     mCallback->InputExhausted();
   }
 }
 
 void
 TheoraDecoder::Input(MediaRawData* aSample)
 {
--- a/dom/media/platforms/agnostic/TheoraDecoder.h
+++ b/dom/media/platforms/agnostic/TheoraDecoder.h
@@ -36,17 +36,17 @@ public:
   {
     return "theora video decoder";
   }
 
 private:
   nsresult DoDecodeHeader(const unsigned char* aData, size_t aLength);
 
   void ProcessDecode(MediaRawData* aSample);
-  int DoDecode(MediaRawData* aSample);
+  MediaResult DoDecode(MediaRawData* aSample);
   void ProcessDrain();
 
   RefPtr<ImageContainer> mImageContainer;
   RefPtr<TaskQueue> mTaskQueue;
   MediaDataDecoderCallback* mCallback;
   Atomic<bool> mIsFlushing;
 
   // Theora header & decoder state
--- a/dom/media/platforms/agnostic/VPXDecoder.cpp
+++ b/dom/media/platforms/agnostic/VPXDecoder.cpp
@@ -92,17 +92,17 @@ VPXDecoder::Flush()
   mIsFlushing = true;
   nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([this] () {
     // nothing to do for now.
   });
   SyncRunnable::DispatchToThread(mTaskQueue, r);
   mIsFlushing = false;
 }
 
-int
+MediaResult
 VPXDecoder::DoDecode(MediaRawData* aSample)
 {
   MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
 #if defined(DEBUG)
   vpx_codec_stream_info_t si;
   PodZero(&si);
   si.sz = sizeof(si);
   if (mCodec == Codec::VP8) {
@@ -111,17 +111,17 @@ VPXDecoder::DoDecode(MediaRawData* aSamp
     vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), aSample->Data(), aSample->Size(), &si);
   }
   NS_ASSERTION(bool(si.is_kf) == aSample->mKeyframe,
                "VPX Decode Keyframe error sample->mKeyframe and si.si_kf out of sync");
 #endif
 
   if (vpx_codec_err_t r = vpx_codec_decode(&mVPX, aSample->Data(), aSample->Size(), nullptr, 0)) {
     LOG("VPX Decode error: %s", vpx_codec_err_to_string(r));
-    return -1;
+    return NS_ERROR_DOM_MEDIA_DECODE_ERR;
   }
 
   vpx_codec_iter_t  iter = nullptr;
   vpx_image_t      *img;
 
   while ((img = vpx_codec_get_frame(&mVPX, &iter))) {
     NS_ASSERTION(img->fmt == VPX_IMG_FMT_I420 ||
                  img->fmt == VPX_IMG_FMT_I444,
@@ -152,17 +152,17 @@ VPXDecoder::DoDecode(MediaRawData* aSamp
     } else if (img->fmt == VPX_IMG_FMT_I444) {
       b.mPlanes[1].mHeight = img->d_h;
       b.mPlanes[1].mWidth = img->d_w;
 
       b.mPlanes[2].mHeight = img->d_h;
       b.mPlanes[2].mWidth = img->d_w;
     } else {
       LOG("VPX Unknown image format");
-      return -1;
+      return NS_ERROR_DOM_MEDIA_DECODE_ERR;
     }
 
     RefPtr<VideoData> v =
       VideoData::CreateAndCopyData(mInfo,
                                    mImageContainer,
                                    aSample->mOffset,
                                    aSample->mTime,
                                    aSample->mDuration,
@@ -171,33 +171,33 @@ VPXDecoder::DoDecode(MediaRawData* aSamp
                                    aSample->mTimecode,
                                    mInfo.ScaledImageRect(img->d_w,
                                                          img->d_h));
 
     if (!v) {
       LOG("Image allocation error source %ldx%ld display %ldx%ld picture %ldx%ld",
           img->d_w, img->d_h, mInfo.mDisplay.width, mInfo.mDisplay.height,
           mInfo.mImage.width, mInfo.mImage.height);
-      return -1;
+      return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
     }
     mCallback->Output(v);
   }
-  return 0;
+  return NS_OK;
 }
 
 void
 VPXDecoder::ProcessDecode(MediaRawData* aSample)
 {
   MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
   if (mIsFlushing) {
     return;
   }
-  if (DoDecode(aSample) == -1) {
-    mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
-                                      __func__));
+  MediaResult rv = DoDecode(aSample);
+  if (NS_FAILED(rv)) {
+    mCallback->Error(rv);
   } else {
     mCallback->InputExhausted();
   }
 }
 
 void
 VPXDecoder::Input(MediaRawData* aSample)
 {
--- a/dom/media/platforms/agnostic/VPXDecoder.h
+++ b/dom/media/platforms/agnostic/VPXDecoder.h
@@ -43,17 +43,17 @@ public:
   // identify VPX of the specified type. Does not parse general content type
   // strings, i.e. white space matters.
   static bool IsVPX(const nsACString& aMimeType, uint8_t aCodecMask=VP8|VP9);
   static bool IsVP8(const nsACString& aMimeType);
   static bool IsVP9(const nsACString& aMimeType);
 
 private:
   void ProcessDecode(MediaRawData* aSample);
-  int DoDecode(MediaRawData* aSample);
+  MediaResult DoDecode(MediaRawData* aSample);
   void ProcessDrain();
 
   const RefPtr<ImageContainer> mImageContainer;
   const RefPtr<TaskQueue> mTaskQueue;
   MediaDataDecoderCallback* mCallback;
   Atomic<bool> mIsFlushing;
 
   // VPx decoder state
--- a/dom/media/platforms/agnostic/VorbisDecoder.cpp
+++ b/dom/media/platforms/agnostic/VorbisDecoder.cpp
@@ -132,25 +132,26 @@ VorbisDataDecoder::Input(MediaRawData* a
 
 void
 VorbisDataDecoder::ProcessDecode(MediaRawData* aSample)
 {
   MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
   if (mIsFlushing) {
     return;
   }
-  if (DoDecode(aSample) == -1) {
-    mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
-                                      __func__));
+
+  MediaResult rv = DoDecode(aSample);
+  if (NS_FAILED(rv)) {
+    mCallback->Error(rv);
   } else {
     mCallback->InputExhausted();
   }
 }
 
-int
+MediaResult
 VorbisDataDecoder::DoDecode(MediaRawData* aSample)
 {
   MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
 
   const unsigned char* aData = aSample->Data();
   size_t aLength = aSample->Size();
   int64_t aOffset = aSample->mOffset;
   uint64_t aTstampUsecs = aSample->mTime;
@@ -163,67 +164,66 @@ VorbisDataDecoder::DoDecode(MediaRawData
     mFrames = 0;
     mLastFrameTime = Some(aSample->mTime);
   }
 
   ogg_packet pkt = InitVorbisPacket(aData, aLength, false, aSample->mEOS,
                                     aSample->mTimecode, mPacketCount++);
 
   if (vorbis_synthesis(&mVorbisBlock, &pkt) != 0) {
-    return -1;
+    return NS_ERROR_DOM_MEDIA_DECODE_ERR;
   }
 
   if (vorbis_synthesis_blockin(&mVorbisDsp,
                                &mVorbisBlock) != 0) {
-    return -1;
+    return NS_ERROR_DOM_MEDIA_DECODE_ERR;
   }
 
   VorbisPCMValue** pcm = 0;
   int32_t frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
   if (frames == 0) {
-    mCallback->InputExhausted();
-    return 0;
+    return NS_OK;
   }
   while (frames > 0) {
     uint32_t channels = mVorbisDsp.vi->channels;
     uint32_t rate = mVorbisDsp.vi->rate;
     AlignedAudioBuffer buffer(frames*channels);
     if (!buffer) {
-      return -1;
+      return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
     }
     for (uint32_t j = 0; j < channels; ++j) {
       VorbisPCMValue* channel = pcm[j];
       for (uint32_t i = 0; i < uint32_t(frames); ++i) {
         buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
       }
     }
 
     CheckedInt64 duration = FramesToUsecs(frames, rate);
     if (!duration.isValid()) {
       NS_WARNING("Int overflow converting WebM audio duration");
-      return -1;
+      return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
     }
     CheckedInt64 total_duration = FramesToUsecs(mFrames, rate);
     if (!total_duration.isValid()) {
       NS_WARNING("Int overflow converting WebM audio total_duration");
-      return -1;
+      return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
     }
 
     CheckedInt64 time = total_duration + aTstampUsecs;
     if (!time.isValid()) {
       NS_WARNING("Int overflow adding total_duration and aTstampUsecs");
-      return -1;
+      return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
     };
 
     if (!mAudioConverter) {
       AudioConfig in(AudioConfig::ChannelLayout(channels, VorbisLayout(channels)),
                      rate);
       AudioConfig out(channels, rate);
       if (!in.IsValid() || !out.IsValid()) {
-       return -1;
+       return NS_ERROR_DOM_MEDIA_FATAL_ERR;
       }
       mAudioConverter = MakeUnique<AudioConverter>(in, out);
     }
     MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
     AudioSampleBuffer data(Move(buffer));
     data = mAudioConverter->Process(Move(data));
 
     aTotalFrames += frames;
@@ -231,23 +231,23 @@ VorbisDataDecoder::DoDecode(MediaRawData
                                     time.value(),
                                     duration.value(),
                                     frames,
                                     data.Forget(),
                                     channels,
                                     rate));
     mFrames += frames;
     if (vorbis_synthesis_read(&mVorbisDsp, frames) != 0) {
-      return -1;
+      return NS_ERROR_DOM_MEDIA_DECODE_ERR;
     }
 
     frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
   }
 
-  return aTotalFrames > 0 ? 1 : 0;
+  return NS_OK;
 }
 
 void
 VorbisDataDecoder::ProcessDrain()
 {
   MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
   mCallback->DrainComplete();
 }
--- a/dom/media/platforms/agnostic/VorbisDecoder.h
+++ b/dom/media/platforms/agnostic/VorbisDecoder.h
@@ -37,17 +37,17 @@ public:
   // Return true if mimetype is Vorbis
   static bool IsVorbis(const nsACString& aMimeType);
   static const AudioConfig::Channel* VorbisLayout(uint32_t aChannels);
 
 private:
   nsresult DecodeHeader(const unsigned char* aData, size_t aLength);
 
   void ProcessDecode(MediaRawData* aSample);
-  int DoDecode(MediaRawData* aSample);
+  MediaResult DoDecode(MediaRawData* aSample);
   void ProcessDrain();
 
   const AudioInfo& mInfo;
   const RefPtr<TaskQueue> mTaskQueue;
   MediaDataDecoderCallback* mCallback;
 
   // Vorbis decoder state
   vorbis_info mVorbisInfo;
--- a/dom/media/platforms/agnostic/WAVDecoder.cpp
+++ b/dom/media/platforms/agnostic/WAVDecoder.cpp
@@ -60,37 +60,37 @@ RefPtr<MediaDataDecoder::InitPromise>
 WaveDataDecoder::Init()
 {
   return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__);
 }
 
 void
 WaveDataDecoder::Input(MediaRawData* aSample)
 {
-  if (!DoDecode(aSample)) {
-    mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
-                                      __func__));
+  MediaResult rv = DoDecode(aSample);
+  if (NS_FAILED(rv)) {
+    mCallback->Error(rv);
   } else {
     mCallback->InputExhausted();
   }
 }
 
-bool
+MediaResult
 WaveDataDecoder::DoDecode(MediaRawData* aSample)
 {
   size_t aLength = aSample->Size();
   ByteReader aReader(aSample->Data(), aLength);
   int64_t aOffset = aSample->mOffset;
   uint64_t aTstampUsecs = aSample->mTime;
 
   int32_t frames = aLength * 8 / mInfo.mBitDepth / mInfo.mChannels;
 
   AlignedAudioBuffer buffer(frames * mInfo.mChannels);
   if (!buffer) {
-    return false;
+    return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
   }
   for (int i = 0; i < frames; ++i) {
     for (unsigned int j = 0; j < mInfo.mChannels; ++j) {
       if (mInfo.mProfile == 6) {                              //ALAW Data
         uint8_t v = aReader.ReadU8();
         int16_t decoded = DecodeALawSample(v);
         buffer[i * mInfo.mChannels + j] =
             IntegerToAudioSample<AudioDataValue>(decoded);
@@ -122,17 +122,17 @@ WaveDataDecoder::DoDecode(MediaRawData* 
   mCallback->Output(new AudioData(aOffset,
                                   aTstampUsecs,
                                   duration,
                                   frames,
                                   Move(buffer),
                                   mInfo.mChannels,
                                   mInfo.mRate));
 
-  return true;
+  return NS_OK;
 }
 
 void
 WaveDataDecoder::Drain()
 {
   mCallback->DrainComplete();
 }
 
--- a/dom/media/platforms/agnostic/WAVDecoder.h
+++ b/dom/media/platforms/agnostic/WAVDecoder.h
@@ -26,16 +26,16 @@ public:
   void Drain() override;
   void Shutdown() override;
   const char* GetDescriptionName() const override
   {
     return "wave audio decoder";
   }
 
 private:
-  bool DoDecode(MediaRawData* aSample);
+  MediaResult DoDecode(MediaRawData* aSample);
 
   const AudioInfo& mInfo;
   MediaDataDecoderCallback* mCallback;
 };
 
 } // namespace mozilla
 #endif
--- a/dom/media/platforms/apple/AppleVTDecoder.cpp
+++ b/dom/media/platforms/apple/AppleVTDecoder.cpp
@@ -415,17 +415,17 @@ TimingInfoFromSample(MediaRawData* aSamp
   timestamp.presentationTimeStamp =
     CMTimeMake(aSample->mTime, USECS_PER_S);
   timestamp.decodeTimeStamp =
     CMTimeMake(aSample->mTimecode, USECS_PER_S);
 
   return timestamp;
 }
 
-nsresult
+MediaResult
 AppleVTDecoder::DoDecode(MediaRawData* aSample)
 {
   AssertOnTaskQueueThread();
 
   // For some reason this gives me a double-free error with stagefright.
   AutoCFRelease<CMBlockBufferRef> block = nullptr;
   AutoCFRelease<CMSampleBufferRef> sample = nullptr;
   VTDecodeInfoFlags infoFlags;
@@ -442,24 +442,24 @@ AppleVTDecoder::DoDecode(MediaRawData* a
                                           NULL, // Block source.
                                           0,    // Data offset.
                                           aSample->Size(),
                                           false,
                                           block.receive());
   if (rv != noErr) {
     NS_ERROR("Couldn't create CMBlockBuffer");
     mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
-    return NS_ERROR_OUT_OF_MEMORY;
+    return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
   }
   CMSampleTimingInfo timestamp = TimingInfoFromSample(aSample);
   rv = CMSampleBufferCreate(kCFAllocatorDefault, block, true, 0, 0, mFormat, 1, 1, &timestamp, 0, NULL, sample.receive());
   if (rv != noErr) {
     NS_ERROR("Couldn't create CMSampleBuffer");
     mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
-    return NS_ERROR_OUT_OF_MEMORY;
+    return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
   }
 
   VTDecodeFrameFlags decodeFlags =
     kVTDecodeFrame_EnableAsynchronousDecompression;
   rv = VTDecompressionSessionDecodeFrame(mSession,
                                          sample,
                                          decodeFlags,
                                          CreateAppleFrameRef(aSample),
--- a/dom/media/platforms/apple/AppleVTDecoder.h
+++ b/dom/media/platforms/apple/AppleVTDecoder.h
@@ -91,17 +91,17 @@ private:
   const uint32_t mDisplayHeight;
 
   // Method to set up the decompression session.
   nsresult InitializeSession();
   nsresult WaitForAsynchronousFrames();
   CFDictionaryRef CreateDecoderSpecification();
   CFDictionaryRef CreateDecoderExtensions();
   // Method to pass a frame to VideoToolbox for decoding.
-  nsresult DoDecode(MediaRawData* aSample);
+  MediaResult DoDecode(MediaRawData* aSample);
 
   const RefPtr<TaskQueue> mTaskQueue;
   const uint32_t mMaxRefFrames;
   const RefPtr<layers::ImageContainer> mImageContainer;
   Atomic<bool> mIsShutDown;
   const bool mUseSoftwareImages;
 
   // Set on reader/decode thread calling Flush() to indicate that output is
--- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
@@ -112,94 +112,91 @@ CopyAndPackAudio(AVFrame* aFrame, uint32
         *tmp++ = AudioSampleToFloat(data[channel][frame]);
       }
     }
   }
 
   return audio;
 }
 
-FFmpegAudioDecoder<LIBAV_VER>::DecodeResult
+MediaResult
 FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample)
 {
   AVPacket packet;
   mLib->av_init_packet(&packet);
 
   packet.data = const_cast<uint8_t*>(aSample->Data());
   packet.size = aSample->Size();
 
   if (!PrepareFrame()) {
     NS_WARNING("FFmpeg audio decoder failed to allocate frame.");
-    return DecodeResult::FATAL_ERROR;
+    return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
   }
 
   int64_t samplePosition = aSample->mOffset;
   media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime);
-  bool didOutput = false;
 
   while (packet.size > 0) {
     int decoded;
     int bytesConsumed =
       mLib->avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet);
 
     if (bytesConsumed < 0) {
       NS_WARNING("FFmpeg audio decoder error.");
-      return DecodeResult::DECODE_ERROR;
+      return NS_ERROR_DOM_MEDIA_DECODE_ERR;
     }
 
     if (mFrame->format != AV_SAMPLE_FMT_FLT &&
         mFrame->format != AV_SAMPLE_FMT_FLTP &&
         mFrame->format != AV_SAMPLE_FMT_S16 &&
         mFrame->format != AV_SAMPLE_FMT_S16P &&
         mFrame->format != AV_SAMPLE_FMT_S32 &&
         mFrame->format != AV_SAMPLE_FMT_S32P) {
       NS_WARNING("FFmpeg audio decoder outputs unsupported audio format.");
-      return DecodeResult::DECODE_ERROR;
+      return NS_ERROR_DOM_MEDIA_DECODE_ERR;
     }
 
     if (decoded) {
       uint32_t numChannels = mCodecContext->channels;
       AudioConfig::ChannelLayout layout(numChannels);
       if (!layout.IsValid()) {
-        return DecodeResult::FATAL_ERROR;
+        return NS_ERROR_DOM_MEDIA_FATAL_ERR;
       }
 
       uint32_t samplingRate = mCodecContext->sample_rate;
 
       AlignedAudioBuffer audio =
         CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples);
 
       media::TimeUnit duration =
         FramesToTimeUnit(mFrame->nb_samples, samplingRate);
       if (!audio || !duration.IsValid()) {
         NS_WARNING("Invalid count of accumulated audio samples");
-        return DecodeResult::DECODE_ERROR;
+        return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
       }
 
       RefPtr<AudioData> data = new AudioData(samplePosition,
                                              pts.ToMicroseconds(),
                                              duration.ToMicroseconds(),
                                              mFrame->nb_samples,
                                              Move(audio),
                                              numChannels,
                                              samplingRate);
       mCallback->Output(data);
-      didOutput = true;
       pts += duration;
       if (!pts.IsValid()) {
         NS_WARNING("Invalid count of accumulated audio samples");
-        return DecodeResult::DECODE_ERROR;
+        return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
       }
     }
     packet.data += bytesConsumed;
     packet.size -= bytesConsumed;
     samplePosition += bytesConsumed;
   }
-
-  return didOutput ? DecodeResult::DECODE_FRAME : DecodeResult::DECODE_NO_FRAME;
+  return NS_OK;
 }
 
 void
 FFmpegAudioDecoder<LIBAV_VER>::ProcessDrain()
 {
   ProcessFlush();
   mCallback->DrainComplete();
 }
--- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h
@@ -30,15 +30,15 @@ public:
   void InitCodecContext() override;
   static AVCodecID GetCodecId(const nsACString& aMimeType);
   const char* GetDescriptionName() const override
   {
     return "ffmpeg audio decoder";
   }
 
 private:
-  DecodeResult DoDecode(MediaRawData* aSample) override;
+  MediaResult DoDecode(MediaRawData* aSample) override;
   void ProcessDrain() override;
 };
 
 } // namespace mozilla
 
 #endif // __FFmpegAACDecoder_h__
--- a/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
@@ -104,29 +104,21 @@ FFmpegDataDecoder<LIBAV_VER>::Shutdown()
 
 void
 FFmpegDataDecoder<LIBAV_VER>::ProcessDecode(MediaRawData* aSample)
 {
   MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
   if (mIsFlushing) {
     return;
   }
-  switch (DoDecode(aSample)) {
-    case DecodeResult::DECODE_ERROR:
-      mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__));
-      break;
-    case DecodeResult::FATAL_ERROR:
-      mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__));
-      break;
-    case DecodeResult::DECODE_NO_FRAME:
-    case DecodeResult::DECODE_FRAME:
-      mCallback->InputExhausted();
-      break;
-    default:
-      break;
+  MediaResult rv = DoDecode(aSample);
+  if (NS_FAILED(rv)) {
+    mCallback->Error(rv);
+  } else {
+    mCallback->InputExhausted();
   }
 }
 
 void
 FFmpegDataDecoder<LIBAV_VER>::Input(MediaRawData* aSample)
 {
   mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
     this, &FFmpegDataDecoder::ProcessDecode, aSample));
--- a/dom/media/platforms/ffmpeg/FFmpegDataDecoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.h
@@ -35,23 +35,16 @@ public:
   void Input(MediaRawData* aSample) override;
   void Flush() override;
   void Drain() override;
   void Shutdown() override;
 
   static AVCodec* FindAVCodec(FFmpegLibWrapper* aLib, AVCodecID aCodec);
 
 protected:
-  enum DecodeResult {
-    DECODE_FRAME,
-    DECODE_NO_FRAME,
-    DECODE_ERROR,
-    FATAL_ERROR
-  };
-
   // Flush and Drain operation, always run
   virtual void ProcessFlush();
   virtual void ProcessShutdown();
   virtual void InitCodecContext() {}
   AVFrame*        PrepareFrame();
   nsresult        InitDecoder();
 
   FFmpegLibWrapper* mLib;
@@ -59,17 +52,17 @@ protected:
 
   AVCodecContext* mCodecContext;
   AVFrame*        mFrame;
   RefPtr<MediaByteBuffer> mExtraData;
   AVCodecID mCodecID;
 
 private:
   void ProcessDecode(MediaRawData* aSample);
-  virtual DecodeResult DoDecode(MediaRawData* aSample) = 0;
+  virtual MediaResult DoDecode(MediaRawData* aSample) = 0;
   virtual void ProcessDrain() = 0;
 
   static StaticMutex sMonitor;
   const RefPtr<TaskQueue> mTaskQueue;
   // Set/cleared on reader thread calling Flush() to indicate that output is
   // not required and so input samples on mTaskQueue need not be processed.
   Atomic<bool> mIsFlushing;
 };
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -156,62 +156,68 @@ FFmpegVideoDecoder<LIBAV_VER>::InitCodec
   mCodecContext->get_format = ChoosePixelFormat;
 
   mCodecParser = mLib->av_parser_init(mCodecID);
   if (mCodecParser) {
     mCodecParser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
   }
 }
 
-FFmpegVideoDecoder<LIBAV_VER>::DecodeResult
+MediaResult
 FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample)
 {
+  bool gotFrame = false;
+  return DoDecode(aSample, &gotFrame);
+}
+
+MediaResult
+FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample, bool* aGotFrame)
+{
   uint8_t* inputData = const_cast<uint8_t*>(aSample->Data());
   size_t inputSize = aSample->Size();
 
 #if LIBAVCODEC_VERSION_MAJOR >= 54
   if (inputSize && mCodecParser && (mCodecID == AV_CODEC_ID_VP8
 #if LIBAVCODEC_VERSION_MAJOR >= 55
       || mCodecID == AV_CODEC_ID_VP9
 #endif
       )) {
-    bool gotFrame = false;
     while (inputSize) {
       uint8_t* data;
       int size;
       int len = mLib->av_parser_parse2(mCodecParser, mCodecContext, &data, &size,
                                        inputData, inputSize,
                                        aSample->mTime, aSample->mTimecode,
                                        aSample->mOffset);
       if (size_t(len) > inputSize) {
-        return DecodeResult::DECODE_ERROR;
+        return NS_ERROR_DOM_MEDIA_DECODE_ERR;
       }
       inputData += len;
       inputSize -= len;
       if (size) {
-        switch (DoDecode(aSample, data, size)) {
-          case DecodeResult::DECODE_ERROR:
-            return DecodeResult::DECODE_ERROR;
-          case DecodeResult::DECODE_FRAME:
-            gotFrame = true;
-            break;
-          default:
-            break;
+        bool gotFrame = false;
+        MediaResult rv = DoDecode(aSample, data, size, &gotFrame);
+        if (NS_FAILED(rv)) {
+          return rv;
+        }
+        if (gotFrame && aGotFrame) {
+          *aGotFrame = true;
         }
       }
     }
-    return gotFrame ? DecodeResult::DECODE_FRAME : DecodeResult::DECODE_NO_FRAME;
+    return NS_OK;
   }
 #endif
-  return DoDecode(aSample, inputData, inputSize);
+  return DoDecode(aSample, inputData, inputSize, aGotFrame);
 }
 
-FFmpegVideoDecoder<LIBAV_VER>::DecodeResult
+MediaResult
 FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
-                                        uint8_t* aData, int aSize)
+                                        uint8_t* aData, int aSize,
+                                        bool* aGotFrame)
 {
   AVPacket packet;
   mLib->av_init_packet(&packet);
 
   packet.data = aData;
   packet.size = aSize;
   packet.dts = mLastInputDts = aSample->mTimecode;
   packet.pts = aSample->mTime;
@@ -222,17 +228,17 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
   // (FFmpeg >= 1.0 provides av_frame_get_pkt_duration)
   // As such we instead use a map using the dts as key that we will retrieve
   // later.
   // The map will have a typical size of 16 entry.
   mDurationMap.Insert(aSample->mTimecode, aSample->mDuration);
 
   if (!PrepareFrame()) {
     NS_WARNING("FFmpeg h264 decoder failed to allocate frame.");
-    return DecodeResult::FATAL_ERROR;
+    return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
   }
 
   // Required with old version of FFmpeg/LibAV
   mFrame->reordered_opaque = AV_NOPTS_VALUE;
 
   int decoded;
   int bytesConsumed =
     mLib->avcodec_decode_video2(mCodecContext, mFrame, &decoded, &packet);
@@ -240,90 +246,97 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
   FFMPEG_LOG("DoDecodeFrame:decode_video: rv=%d decoded=%d "
              "(Input: pts(%lld) dts(%lld) Output: pts(%lld) "
              "opaque(%lld) pkt_pts(%lld) pkt_dts(%lld))",
              bytesConsumed, decoded, packet.pts, packet.dts, mFrame->pts,
              mFrame->reordered_opaque, mFrame->pkt_pts, mFrame->pkt_dts);
 
   if (bytesConsumed < 0) {
     NS_WARNING("FFmpeg video decoder error.");
-    return DecodeResult::DECODE_ERROR;
+    return NS_ERROR_DOM_MEDIA_DECODE_ERR;
+  }
+
+  if (!decoded) {
+    if (aGotFrame) {
+      *aGotFrame = false;
+    }
+    return NS_OK;
   }
 
   // If we've decoded a frame then we need to output it
-  if (decoded) {
-    int64_t pts = mPtsContext.GuessCorrectPts(mFrame->pkt_pts, mFrame->pkt_dts);
-    // Retrieve duration from dts.
-    // We use the first entry found matching this dts (this is done to
-    // handle damaged file with multiple frames with the same dts)
+  int64_t pts = mPtsContext.GuessCorrectPts(mFrame->pkt_pts, mFrame->pkt_dts);
+  // Retrieve duration from dts.
+  // We use the first entry found matching this dts (this is done to
+  // handle damaged file with multiple frames with the same dts)
 
-    int64_t duration;
-    if (!mDurationMap.Find(mFrame->pkt_dts, duration)) {
-      NS_WARNING("Unable to retrieve duration from map");
-      duration = aSample->mDuration;
-      // dts are probably incorrectly reported ; so clear the map as we're
-      // unlikely to find them in the future anyway. This also guards
-      // against the map becoming extremely big.
-      mDurationMap.Clear();
-    }
-    FFMPEG_LOG("Got one frame output with pts=%lld dts=%lld duration=%lld opaque=%lld",
-               pts, mFrame->pkt_dts, duration, mCodecContext->reordered_opaque);
+  int64_t duration;
+  if (!mDurationMap.Find(mFrame->pkt_dts, duration)) {
+    NS_WARNING("Unable to retrieve duration from map");
+    duration = aSample->mDuration;
+    // dts are probably incorrectly reported ; so clear the map as we're
+    // unlikely to find them in the future anyway. This also guards
+    // against the map becoming extremely big.
+    mDurationMap.Clear();
+  }
+  FFMPEG_LOG("Got one frame output with pts=%lld dts=%lld duration=%lld opaque=%lld",
+              pts, mFrame->pkt_dts, duration, mCodecContext->reordered_opaque);
 
-    VideoData::YCbCrBuffer b;
-    b.mPlanes[0].mData = mFrame->data[0];
-    b.mPlanes[1].mData = mFrame->data[1];
-    b.mPlanes[2].mData = mFrame->data[2];
+  VideoData::YCbCrBuffer b;
+  b.mPlanes[0].mData = mFrame->data[0];
+  b.mPlanes[1].mData = mFrame->data[1];
+  b.mPlanes[2].mData = mFrame->data[2];
 
-    b.mPlanes[0].mStride = mFrame->linesize[0];
-    b.mPlanes[1].mStride = mFrame->linesize[1];
-    b.mPlanes[2].mStride = mFrame->linesize[2];
+  b.mPlanes[0].mStride = mFrame->linesize[0];
+  b.mPlanes[1].mStride = mFrame->linesize[1];
+  b.mPlanes[2].mStride = mFrame->linesize[2];
 
-    b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;
-    b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;
-    b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;
+  b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;
+  b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;
+  b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;
 
-    b.mPlanes[0].mWidth = mFrame->width;
-    b.mPlanes[0].mHeight = mFrame->height;
-    if (mCodecContext->pix_fmt == AV_PIX_FMT_YUV444P) {
-      b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = mFrame->width;
-      b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = mFrame->height;
-    } else {
-      b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = (mFrame->width + 1) >> 1;
-      b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = (mFrame->height + 1) >> 1;
-    }
+  b.mPlanes[0].mWidth = mFrame->width;
+  b.mPlanes[0].mHeight = mFrame->height;
+  if (mCodecContext->pix_fmt == AV_PIX_FMT_YUV444P) {
+    b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = mFrame->width;
+    b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = mFrame->height;
+  } else {
+    b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = (mFrame->width + 1) >> 1;
+    b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = (mFrame->height + 1) >> 1;
+  }
 
-    RefPtr<VideoData> v =
-      VideoData::CreateAndCopyData(mInfo,
-                                   mImageContainer,
-                                   aSample->mOffset,
-                                   pts,
-                                   duration,
-                                   b,
-                                   !!mFrame->key_frame,
-                                   -1,
-                                   mInfo.ScaledImageRect(mFrame->width,
-                                                         mFrame->height));
+  RefPtr<VideoData> v =
+    VideoData::CreateAndCopyData(mInfo,
+                                  mImageContainer,
+                                  aSample->mOffset,
+                                  pts,
+                                  duration,
+                                  b,
+                                  !!mFrame->key_frame,
+                                  -1,
+                                  mInfo.ScaledImageRect(mFrame->width,
+                                                        mFrame->height));
 
-    if (!v) {
-      NS_WARNING("image allocation error.");
-      return DecodeResult::FATAL_ERROR;
-    }
-    mCallback->Output(v);
-    return DecodeResult::DECODE_FRAME;
+  if (!v) {
+    NS_WARNING("image allocation error.");
+    return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
   }
-  return DecodeResult::DECODE_NO_FRAME;
+  mCallback->Output(v);
+  if (aGotFrame) {
+    *aGotFrame = true;
+  }
+  return NS_OK;
 }
 
 void
 FFmpegVideoDecoder<LIBAV_VER>::ProcessDrain()
 {
   RefPtr<MediaRawData> empty(new MediaRawData());
   empty->mTimecode = mLastInputDts;
-  while (DoDecode(empty) == DecodeResult::DECODE_FRAME) {
-  }
+  bool gotFrame = false;
+  while (NS_SUCCEEDED(DoDecode(empty, &gotFrame)) && gotFrame);
   mCallback->DrainComplete();
 }
 
 void
 FFmpegVideoDecoder<LIBAV_VER>::ProcessFlush()
 {
   mPtsContext.Reset();
   mDurationMap.Clear();
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
@@ -41,18 +41,19 @@ public:
     return "ffvpx video decoder";
 #else
     return "ffmpeg video decoder";
 #endif
   }
   static AVCodecID GetCodecId(const nsACString& aMimeType);
 
 private:
-  DecodeResult DoDecode(MediaRawData* aSample) override;
-  DecodeResult DoDecode(MediaRawData* aSample, uint8_t* aData, int aSize);
+  MediaResult DoDecode(MediaRawData* aSample) override;
+  MediaResult DoDecode(MediaRawData* aSample, bool* aGotFrame);
+  MediaResult DoDecode(MediaRawData* aSample, uint8_t* aData, int aSize, bool* aGotFrame);
   void ProcessDrain() override;
   void ProcessFlush() override;
   void OutputDelayedFrames();
 
   /**
    * This method allocates a buffer for FFmpeg's decoder, wrapped in an Image.
    * Currently it only supports Planar YUV420, which appears to be the only
    * non-hardware accelerated image format that FFmpeg's H264 decoder is