Bug 1300703: [webm] Use default duration if known. r?kinetik
MozReview-Commit-ID: J18IdDGkL62
--- a/dom/media/webm/WebMDemuxer.cpp
+++ b/dom/media/webm/WebMDemuxer.cpp
@@ -350,16 +350,23 @@ WebMDemuxer::ReadMetadata()
// ensures that our video frame creation code doesn't overflow.
nsIntSize displaySize(params.display_width, params.display_height);
nsIntSize frameSize(params.width, params.height);
if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
// Video track's frame sizes will overflow. Ignore the video track.
continue;
}
+ uint64_t defaultDuration;
+ r = nestegg_track_default_duration(context, track, &defaultDuration);
+ if (r >= 0) {
+ mVideoDefaultDuration =
+ Some(uint64_t(media::TimeUnit::FromNanoseconds(defaultDuration).ToMicroseconds()));
+ }
+
mVideoTrack = track;
mHasVideo = true;
mInfo.mVideo.mDisplay = displaySize;
mInfo.mVideo.mImage = frameSize;
mInfo.mVideo.SetImageRect(pictureRect);
switch (params.stereo_mode) {
@@ -390,16 +397,23 @@ WebMDemuxer::ReadMetadata()
}
} else if (type == NESTEGG_TRACK_AUDIO && !mHasAudio) {
nestegg_audio_params params;
r = nestegg_track_audio_params(context, track, ¶ms);
if (r == -1) {
return NS_ERROR_FAILURE;
}
+ uint64_t defaultDuration;
+ r = nestegg_track_default_duration(context, track, &defaultDuration);
+ if (r >= 0) {
+ mAudioDefaultDuration =
+ Some(uint64_t(media::TimeUnit::FromNanoseconds(defaultDuration).ToMicroseconds()));
+ }
+
mAudioTrack = track;
mHasAudio = true;
mAudioCodec = nestegg_track_codec_id(context, track);
if (mAudioCodec == NESTEGG_CODEC_VORBIS) {
mInfo.mAudio.mMimeType = "audio/webm; codecs=vorbis";
} else if (mAudioCodec == NESTEGG_CODEC_OPUS) {
mInfo.mAudio.mMimeType = "audio/webm; codecs=opus";
OpusDataDecoder::AppendCodecDelay(mInfo.mAudio.mCodecSpecificConfig,
@@ -582,32 +596,40 @@ WebMDemuxer::GetNextPacket(TrackInfo::Tr
if (next_holder) {
next_tstamp = next_holder->Timestamp();
PushAudioPacket(next_holder);
} else if (duration >= 0) {
next_tstamp = tstamp + duration;
} else if (!mIsMediaSource ||
(mIsMediaSource && mLastAudioFrameTime.isSome())) {
next_tstamp = tstamp;
- next_tstamp += tstamp - mLastAudioFrameTime.refOr(0);
+ if (mAudioDefaultDuration.isSome()) {
+ next_tstamp += mAudioDefaultDuration.ref();
+ } else {
+ next_tstamp += tstamp - mLastAudioFrameTime.refOr(0);
+ }
} else {
PushAudioPacket(holder);
}
mLastAudioFrameTime = Some(tstamp);
} else if (aType == TrackInfo::kVideoTrack) {
RefPtr<NesteggPacketHolder> next_holder(NextPacket(aType));
if (next_holder) {
next_tstamp = next_holder->Timestamp();
PushVideoPacket(next_holder);
} else if (duration >= 0) {
next_tstamp = tstamp + duration;
} else if (!mIsMediaSource ||
(mIsMediaSource && mLastVideoFrameTime.isSome())) {
next_tstamp = tstamp;
- next_tstamp += tstamp - mLastVideoFrameTime.refOr(0);
+ if (mVideoDefaultDuration.isSome()) {
+ next_tstamp += mVideoDefaultDuration.ref();
+ } else {
+ next_tstamp += tstamp - mLastVideoFrameTime.refOr(0);
+ }
} else {
PushVideoPacket(holder);
}
mLastVideoFrameTime = Some(tstamp);
}
if (mIsMediaSource && next_tstamp == INT64_MIN) {
return false;
--- a/dom/media/webm/WebMDemuxer.h
+++ b/dom/media/webm/WebMDemuxer.h
@@ -215,16 +215,18 @@ private:
// Nanoseconds to discard after seeking.
uint64_t mSeekPreroll;
// Calculate the frame duration from the last decodeable frame using the
// previous frame's timestamp. In NS.
Maybe<int64_t> mLastAudioFrameTime;
Maybe<int64_t> mLastVideoFrameTime;
+ Maybe<uint64_t> mAudioDefaultDuration;
+ Maybe<uint64_t> mVideoDefaultDuration;
// Codec ID of audio track
int mAudioCodec;
// Codec ID of video track
int mVideoCodec;
// Booleans to indicate if we have audio and/or video data
bool mHasVideo;