Bug 1334508: Treat demuxing errors differently than EOS. r?kinetik draft
authorJean-Yves Avenard <jyavenard@mozilla.com>
Sun, 19 Feb 2017 23:46:16 +0100
changeset 486782 25f571e80a406c20618df1768b1548455b157d83
parent 486781 59685e17cbc04e30cd43cb392d5b5d984d60ecc5
child 546314 01a44d8bf95769e7b2cfc1c6665321854a4af734
push id46057
push userbmo:jyavenard@mozilla.com
push dateSun, 19 Feb 2017 22:47:58 +0000
reviewerskinetik
bugs1334508
milestone54.0a1
Bug 1334508: Treat demuxing errors differently than EOS. r?kinetik MozReview-Commit-ID: JZTjo8jxfCr
dom/media/webm/WebMDemuxer.cpp
dom/media/webm/WebMDemuxer.h
--- a/dom/media/webm/WebMDemuxer.cpp
+++ b/dom/media/webm/WebMDemuxer.cpp
@@ -561,95 +561,104 @@ WebMDemuxer::GetTrackCrypto(TrackInfo::T
     // crypto.mMode is not used for WebMs
     crypto.mIVSize = WEBM_IV_SIZE;
     crypto.mKeyId = Move(initData);
   }
 
   return crypto;
 }
 
-bool
+nsresult
 WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType,
                            MediaRawDataQueue *aSamples)
 {
   if (mIsMediaSource) {
     // To ensure mLastWebMBlockOffset is properly up to date.
     EnsureUpToDateIndex();
   }
 
-  RefPtr<NesteggPacketHolder> holder(NextPacket(aType));
+  RefPtr<NesteggPacketHolder> holder;
+  nsresult rv = NextPacket(aType, holder);
 
-  if (!holder) {
-    return false;
+  if (NS_FAILED(rv)) {
+    return rv;
   }
 
   int r = 0;
   unsigned int count = 0;
   r = nestegg_packet_count(holder->Packet(), &count);
   if (r == -1) {
-    return false;
+    return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
   }
   int64_t tstamp = holder->Timestamp();
   int64_t duration = holder->Duration();
 
   // The end time of this frame is the start time of the next frame. Fetch
   // the timestamp of the next packet for this track.  If we've reached the
   // end of the resource, use the file's duration as the end time of this
   // video frame.
   int64_t next_tstamp = INT64_MIN;
   if (aType == TrackInfo::kAudioTrack) {
-    RefPtr<NesteggPacketHolder> next_holder(NextPacket(aType));
+    RefPtr<NesteggPacketHolder> next_holder;
+    rv = NextPacket(aType, next_holder);
+    if (NS_FAILED(rv) && rv != NS_ERROR_DOM_MEDIA_END_OF_STREAM) {
+      return rv;
+    }
     if (next_holder) {
       next_tstamp = next_holder->Timestamp();
       PushAudioPacket(next_holder);
     } else if (duration >= 0) {
       next_tstamp = tstamp + duration;
     } else if (!mIsMediaSource
                || (mIsMediaSource && mLastAudioFrameTime.isSome())) {
       next_tstamp = tstamp;
       next_tstamp += tstamp - mLastAudioFrameTime.refOr(0);
     } else {
       PushAudioPacket(holder);
     }
     mLastAudioFrameTime = Some(tstamp);
   } else if (aType == TrackInfo::kVideoTrack) {
-    RefPtr<NesteggPacketHolder> next_holder(NextPacket(aType));
+    RefPtr<NesteggPacketHolder> next_holder;
+    rv = NextPacket(aType, next_holder);
+    if (NS_FAILED(rv) && rv != NS_ERROR_DOM_MEDIA_END_OF_STREAM) {
+      return rv;
+    }
     if (next_holder) {
       next_tstamp = next_holder->Timestamp();
       PushVideoPacket(next_holder);
     } else if (duration >= 0) {
       next_tstamp = tstamp + duration;
     } else if (!mIsMediaSource
                || (mIsMediaSource && mLastVideoFrameTime.isSome())) {
       next_tstamp = tstamp;
       next_tstamp += tstamp - mLastVideoFrameTime.refOr(0);
     } else {
       PushVideoPacket(holder);
     }
     mLastVideoFrameTime = Some(tstamp);
   }
 
   if (mIsMediaSource && next_tstamp == INT64_MIN) {
-    return false;
+    return NS_ERROR_DOM_MEDIA_END_OF_STREAM;
   }
 
   int64_t discardPadding = 0;
   if (aType == TrackInfo::kAudioTrack) {
     (void) nestegg_packet_discard_padding(holder->Packet(), &discardPadding);
   }
 
   int packetEncryption = nestegg_packet_encryption(holder->Packet());
 
   for (uint32_t i = 0; i < count; ++i) {
     unsigned char* data;
     size_t length;
     r = nestegg_packet_data(holder->Packet(), i, &data, &length);
     if (r == -1) {
       WEBM_DEBUG("nestegg_packet_data failed r=%d", r);
-      return false;
+      return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
     }
     unsigned char* alphaData;
     size_t alphaLength = 0;
     // Check packets for alpha information if file has declared alpha frames
     // may be present.
     if (mInfo.mVideo.HasAlpha()) {
       r = nestegg_packet_additional_data(holder->Packet(),
                                          1,
@@ -701,23 +710,23 @@ WebMDemuxer::GetNextPacket(TrackInfo::Tr
 
     WEBM_DEBUG("push sample tstamp: %" PRId64 " next_tstamp: %" PRId64 " length: %" PRIuSIZE " kf: %d",
                tstamp, next_tstamp, length, isKeyframe);
     RefPtr<MediaRawData> sample;
     if (mInfo.mVideo.HasAlpha() && alphaLength != 0) {
       sample = new MediaRawData(data, length, alphaData, alphaLength);
       if ((length && !sample->Data()) || (alphaLength && !sample->AlphaData())) {
         // OOM.
-        return false;
+        return NS_ERROR_OUT_OF_MEMORY;
       }
     } else {
       sample = new MediaRawData(data, length);
       if (length && !sample->Data()) {
         // OOM.
-        return false;
+        return NS_ERROR_OUT_OF_MEMORY;
       }
     }
     sample->mTimecode = tstamp;
     sample->mTime = tstamp;
     sample->mDuration = next_tstamp - tstamp;
     sample->mOffset = holder->Offset();
     sample->mKeyframe = isKeyframe;
     if (discardPadding && i == count - 1) {
@@ -826,79 +835,88 @@ WebMDemuxer::GetNextPacket(TrackInfo::Tr
         }
       }
     }
     if (aType == TrackInfo::kVideoTrack) {
       sample->mTrackInfo = mSharedVideoTrackInfo;
     }
     aSamples->Push(sample);
   }
-  return true;
+  return NS_OK;
 }
 
-RefPtr<NesteggPacketHolder>
-WebMDemuxer::NextPacket(TrackInfo::TrackType aType)
+nsresult
+WebMDemuxer::NextPacket(TrackInfo::TrackType aType,
+                        RefPtr<NesteggPacketHolder>& aPacket)
 {
   bool isVideo = aType == TrackInfo::kVideoTrack;
 
   // Flag to indicate that we do need to playback these types of
   // packets.
   bool hasType = isVideo ? mHasVideo : mHasAudio;
 
   if (!hasType) {
-    return nullptr;
+    return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
   }
 
   // The packet queue for the type that we are interested in.
   WebMPacketQueue &packets = isVideo ? mVideoPackets : mAudioPackets;
 
   if (packets.GetSize() > 0) {
-    return packets.PopFront();
+    aPacket = packets.PopFront();
+    return NS_OK;
   }
 
   // Track we are interested in
   uint32_t ourTrack = isVideo ? mVideoTrack : mAudioTrack;
 
   do {
-    RefPtr<NesteggPacketHolder> holder = DemuxPacket(aType);
+    RefPtr<NesteggPacketHolder> holder;
+    nsresult rv = DemuxPacket(aType, holder);
+    if (NS_FAILED(rv)) {
+      return rv;
+    }
     if (!holder) {
-      return nullptr;
+      return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
     }
 
     if (ourTrack == holder->Track()) {
-      return holder;
+      aPacket = holder;
+      return NS_OK;
     }
   } while (true);
 }
 
-RefPtr<NesteggPacketHolder>
-WebMDemuxer::DemuxPacket(TrackInfo::TrackType aType)
+nsresult
+WebMDemuxer::DemuxPacket(TrackInfo::TrackType aType,
+                         RefPtr<NesteggPacketHolder>& aPacket)
 {
   nestegg_packet* packet;
   int r = nestegg_read_packet(Context(aType), &packet);
   if (r == 0) {
     nestegg_read_reset(Context(aType));
-    return nullptr;
+    return NS_ERROR_DOM_MEDIA_END_OF_STREAM;
   } else if (r < 0) {
-    return nullptr;
+    return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
   }
 
   unsigned int track = 0;
   r = nestegg_packet_track(packet, &track);
   if (r == -1) {
-    return nullptr;
+    return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
   }
 
   int64_t offset = Resource(aType).Tell();
   RefPtr<NesteggPacketHolder> holder = new NesteggPacketHolder();
   if (!holder->Init(packet, offset, track, false)) {
-    return nullptr;
+    return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
   }
 
-  return holder;
+  aPacket = holder;
+  return NS_OK;
 }
 
 void
 WebMDemuxer::PushAudioPacket(NesteggPacketHolder* aItem)
 {
   mAudioPackets.PushFront(aItem);
 }
 
@@ -1048,62 +1066,70 @@ RefPtr<WebMTrackDemuxer::SeekPromise>
 WebMTrackDemuxer::Seek(const media::TimeUnit& aTime)
 {
   // Seeks to aTime. Upon success, SeekPromise will be resolved with the
   // actual time seeked to. Typically the random access point time
 
   media::TimeUnit seekTime = aTime;
   mSamples.Reset();
   mParent->SeekInternal(mType, aTime);
-  mParent->GetNextPacket(mType, &mSamples);
+  nsresult rv = mParent->GetNextPacket(mType, &mSamples);
+  if (NS_FAILED(rv)) {
+    return SeekPromise::CreateAndReject(rv, __func__);
+  }
   mNeedKeyframe = true;
 
   // Check what time we actually seeked to.
   if (mSamples.GetSize() > 0) {
     const RefPtr<MediaRawData>& sample = mSamples.First();
     seekTime = media::TimeUnit::FromMicroseconds(sample->mTime);
   }
   SetNextKeyFrameTime();
 
   return SeekPromise::CreateAndResolve(seekTime, __func__);
 }
 
-RefPtr<MediaRawData>
-WebMTrackDemuxer::NextSample()
+nsresult
+WebMTrackDemuxer::NextSample(RefPtr<MediaRawData>& aData)
 {
-  while (mSamples.GetSize() < 1 && mParent->GetNextPacket(mType, &mSamples)) {
+  nsresult rv;
+  while (mSamples.GetSize() < 1 &&
+         NS_SUCCEEDED((rv = mParent->GetNextPacket(mType, &mSamples)))) {
   }
   if (mSamples.GetSize()) {
-    return mSamples.PopFront();
+    aData = mSamples.PopFront();
+    return NS_OK;
   }
-  return nullptr;
+  return rv;
 }
 
 RefPtr<WebMTrackDemuxer::SamplesPromise>
 WebMTrackDemuxer::GetSamples(int32_t aNumSamples)
 {
   RefPtr<SamplesHolder> samples = new SamplesHolder;
   MOZ_ASSERT(aNumSamples);
 
+  nsresult rv = NS_ERROR_DOM_MEDIA_END_OF_STREAM;
+
   while (aNumSamples) {
-    RefPtr<MediaRawData> sample(NextSample());
-    if (!sample) {
+    RefPtr<MediaRawData> sample;
+    rv = NextSample(sample);
+    if (NS_FAILED(rv)) {
       break;
     }
     if (mNeedKeyframe && !sample->mKeyframe) {
       continue;
     }
     mNeedKeyframe = false;
     samples->mSamples.AppendElement(sample);
     aNumSamples--;
   }
 
   if (samples->mSamples.IsEmpty()) {
-    return SamplesPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_END_OF_STREAM,
-                                           __func__);
+    return SamplesPromise::CreateAndReject(rv, __func__);
   } else {
     UpdateSamples(samples->mSamples);
     return SamplesPromise::CreateAndResolve(samples, __func__);
   }
 }
 
 void
 WebMTrackDemuxer::SetNextKeyFrameTime()
@@ -1128,17 +1154,18 @@ WebMTrackDemuxer::SetNextKeyFrameTime()
   }
   Maybe<int64_t> startTime;
   if (skipSamplesQueue.GetSize()) {
     const RefPtr<MediaRawData>& sample = skipSamplesQueue.First();
     startTime.emplace(sample->mTimecode);
   }
   // Demux and buffer frames until we find a keyframe.
   RefPtr<MediaRawData> sample;
-  while (!foundKeyframe && (sample = NextSample())) {
+  nsresult rv = NS_OK;
+  while (!foundKeyframe && NS_SUCCEEDED((rv = NextSample(sample)))) {
     if (sample->mKeyframe) {
       frameTime = sample->mTime;
       foundKeyframe = true;
     }
     int64_t sampleTimecode = sample->mTimecode;
     skipSamplesQueue.Push(sample.forget());
     if (!startTime) {
       startTime.emplace(sampleTimecode);
@@ -1214,29 +1241,32 @@ WebMTrackDemuxer::GetNextRandomAccessPoi
 
 RefPtr<WebMTrackDemuxer::SkipAccessPointPromise>
 WebMTrackDemuxer::SkipToNextRandomAccessPoint(
   const media::TimeUnit& aTimeThreshold)
 {
   uint32_t parsed = 0;
   bool found = false;
   RefPtr<MediaRawData> sample;
+  nsresult rv = NS_OK;
   int64_t sampleTime;
 
   WEBM_DEBUG("TimeThreshold: %f", aTimeThreshold.ToSeconds());
-  while (!found && (sample = NextSample())) {
+  while (!found && NS_SUCCEEDED((rv = NextSample(sample)))) {
     parsed++;
     sampleTime = sample->mTime;
     if (sample->mKeyframe && sampleTime >= aTimeThreshold.ToMicroseconds()) {
       found = true;
       mSamples.Reset();
       mSamples.PushFront(sample.forget());
     }
   }
-  SetNextKeyFrameTime();
+  if (NS_SUCCEEDED(rv)) {
+    SetNextKeyFrameTime();
+  }
   if (found) {
     WEBM_DEBUG("next sample: %f (parsed: %d)",
                media::TimeUnit::FromMicroseconds(sampleTime).ToSeconds(),
                parsed);
     return SkipAccessPointPromise::CreateAndResolve(parsed, __func__);
   } else {
     SkipFailureHolder failure(NS_ERROR_DOM_MEDIA_END_OF_STREAM, parsed);
     return SkipAccessPointPromise::CreateAndReject(Move(failure), __func__);
--- a/dom/media/webm/WebMDemuxer.h
+++ b/dom/media/webm/WebMDemuxer.h
@@ -119,17 +119,18 @@ public:
 
   bool IsSeekableOnlyInBufferedRanges() const override;
 
   UniquePtr<EncryptionInfo> GetCrypto() override;
 
   bool GetOffsetForTime(uint64_t aTime, int64_t* aOffset);
 
   // Demux next WebM packet and append samples to MediaRawDataQueue
-  bool GetNextPacket(TrackInfo::TrackType aType, MediaRawDataQueue *aSamples);
+  nsresult GetNextPacket(TrackInfo::TrackType aType,
+                         MediaRawDataQueue *aSamples);
 
   nsresult Reset(TrackInfo::TrackType aType);
 
   // Pushes a packet to the front of the audio packet queue.
   void PushAudioPacket(NesteggPacketHolder* aItem);
 
   // Pushes a packet to the front of the video packet queue.
   void PushVideoPacket(NesteggPacketHolder* aItem);
@@ -187,21 +188,23 @@ private:
   media::TimeIntervals GetBuffered();
   nsresult SeekInternal(TrackInfo::TrackType aType,
                         const media::TimeUnit& aTarget);
   CryptoTrack GetTrackCrypto(TrackInfo::TrackType aType, size_t aTrackNumber);
 
   // Read a packet from the nestegg file. Returns nullptr if all packets for
   // the particular track have been read. Pass TrackInfo::kVideoTrack or
   // TrackInfo::kVideoTrack to indicate the type of the packet we want to read.
-  RefPtr<NesteggPacketHolder> NextPacket(TrackInfo::TrackType aType);
+  nsresult NextPacket(TrackInfo::TrackType aType,
+                      RefPtr<NesteggPacketHolder>& aPacket);
 
   // Internal method that demuxes the next packet from the stream. The caller
   // is responsible for making sure it doesn't get lost.
-  RefPtr<NesteggPacketHolder> DemuxPacket(TrackInfo::TrackType aType);
+  nsresult DemuxPacket(TrackInfo::TrackType aType,
+                       RefPtr<NesteggPacketHolder>& aPacket);
 
   // libnestegg audio and video context for webm container.
   // Access on reader's thread only.
   NestEggContext mVideoContext;
   NestEggContext mAudioContext;
   MediaResourceIndex& Resource(TrackInfo::TrackType aType)
   {
     return aType == TrackInfo::kVideoTrack
@@ -289,17 +292,17 @@ public:
 
   void BreakCycles() override;
 
 private:
   friend class WebMDemuxer;
   ~WebMTrackDemuxer();
   void UpdateSamples(nsTArray<RefPtr<MediaRawData>>& aSamples);
   void SetNextKeyFrameTime();
-  RefPtr<MediaRawData> NextSample ();
+  nsresult NextSample(RefPtr<MediaRawData>& aData);
   RefPtr<WebMDemuxer> mParent;
   TrackInfo::TrackType mType;
   UniquePtr<TrackInfo> mInfo;
   Maybe<media::TimeUnit> mNextKeyframeTime;
   bool mNeedKeyframe;
 
   // Queued samples extracted by the demuxer, but not yet returned.
   MediaRawDataQueue mSamples;