--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -229,21 +229,22 @@ VideoData::UpdateDuration(const TimeUnit
mDuration = aDuration;
}
void
VideoData::UpdateTimestamp(int64_t aTimestamp)
{
MOZ_ASSERT(aTimestamp >= 0);
- int64_t updatedDuration = GetEndTime() - aTimestamp;
- MOZ_ASSERT(updatedDuration >= 0);
+ auto updatedDuration =
+ GetEndTime() - TimeUnit::FromMicroseconds(aTimestamp);
+ MOZ_ASSERT(!updatedDuration.IsNegative());
mTime = aTimestamp;
- mDuration = TimeUnit::FromMicroseconds(updatedDuration);
+ mDuration = updatedDuration;
}
/* static */
bool VideoData::SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
const VideoInfo& aInfo,
const YCbCrBuffer &aBuffer,
const IntRect& aPicture,
bool aCopyData)
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -318,17 +318,20 @@ public:
// Duration of sample, in microseconds.
media::TimeUnit mDuration;
// Amount of frames for contained data.
const uint32_t mFrames;
bool mKeyframe;
- int64_t GetEndTime() const { return mTime + mDuration.ToMicroseconds(); }
+ media::TimeUnit GetEndTime() const
+ {
+ return media::TimeUnit::FromMicroseconds(mTime) + mDuration;
+ }
bool AdjustForStartTime(int64_t aStartTime)
{
mTime = mTime - aStartTime;
return mTime >= 0;
}
template <typename ReturnType>
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -1400,36 +1400,39 @@ private:
return NS_OK;
}
nsresult DropVideoUpToSeekTarget(VideoData* aVideo)
{
MOZ_ASSERT(aVideo);
SLOG("DropVideoUpToSeekTarget() frame [%" PRId64 ", %" PRId64 "]",
- aVideo->mTime, aVideo->GetEndTime());
- const int64_t target = mSeekJob.mTarget->GetTime().ToMicroseconds();
+ aVideo->mTime, aVideo->GetEndTime().ToMicroseconds());
+ const auto target = mSeekJob.mTarget->GetTime();
// If the frame end time is less than the seek target, we won't want
// to display this frame after the seek, so discard it.
if (target >= aVideo->GetEndTime()) {
SLOG("DropVideoUpToSeekTarget() pop video frame [%" PRId64 ", %" PRId64 "] target=%" PRId64,
- aVideo->mTime, aVideo->GetEndTime(), target);
+ aVideo->mTime, aVideo->GetEndTime().ToMicroseconds(),
+ target.ToMicroseconds());
mFirstVideoFrameAfterSeek = aVideo;
} else {
- if (target >= aVideo->mTime && aVideo->GetEndTime() >= target) {
+ if (target.ToMicroseconds() >= aVideo->mTime &&
+ aVideo->GetEndTime() >= target) {
// The seek target lies inside this frame's time slice. Adjust the
// frame's start time to match the seek target.
- aVideo->UpdateTimestamp(target);
+ aVideo->UpdateTimestamp(target.ToMicroseconds());
}
mFirstVideoFrameAfterSeek = nullptr;
SLOG("DropVideoUpToSeekTarget() found video frame [%" PRId64 ", %" PRId64 "] "
"containing target=%" PRId64,
- aVideo->mTime, aVideo->GetEndTime(), target);
+ aVideo->mTime, aVideo->GetEndTime().ToMicroseconds(),
+ target.ToMicroseconds());
MOZ_ASSERT(VideoQueue().GetSize() == 0,
"Should be the 1st sample after seeking");
mMaster->PushVideo(aVideo);
mDoneVideoSeeking = true;
}
return NS_OK;
@@ -3168,19 +3171,19 @@ MediaDecoderStateMachine::RequestAudioDa
RefPtr<MediaDecoderStateMachine> self = this;
mReader->RequestAudioData()->Then(
OwnerThread(), __func__,
[this, self] (AudioData* aAudio) {
MOZ_ASSERT(aAudio);
mAudioDataRequest.Complete();
// audio->GetEndTime() is not always mono-increasing in chained ogg.
mDecodedAudioEndTime = std::max(
- TimeUnit::FromMicroseconds(aAudio->GetEndTime()), mDecodedAudioEndTime);
+ aAudio->GetEndTime(), mDecodedAudioEndTime);
LOGV("OnAudioDecoded [%" PRId64 ",%" PRId64 "]", aAudio->mTime,
- aAudio->GetEndTime());
+ aAudio->GetEndTime().ToMicroseconds());
mStateObj->HandleAudioDecoded(aAudio);
},
[this, self] (const MediaResult& aError) {
LOGV("OnAudioNotDecoded aError=%" PRIu32, static_cast<uint32_t>(aError.Code()));
mAudioDataRequest.Complete();
switch (aError.Code()) {
case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA:
mStateObj->HandleWaitingForAudio();
@@ -3214,19 +3217,19 @@ MediaDecoderStateMachine::RequestVideoDa
RefPtr<MediaDecoderStateMachine> self = this;
mReader->RequestVideoData(aSkipToNextKeyframe, aCurrentTime)->Then(
OwnerThread(), __func__,
[this, self, videoDecodeStartTime] (VideoData* aVideo) {
MOZ_ASSERT(aVideo);
mVideoDataRequest.Complete();
// Handle abnormal or negative timestamps.
mDecodedVideoEndTime = std::max(
- mDecodedVideoEndTime, TimeUnit::FromMicroseconds(aVideo->GetEndTime()));
+ mDecodedVideoEndTime, aVideo->GetEndTime());
LOGV("OnVideoDecoded [%" PRId64 ",%" PRId64 "]", aVideo->mTime,
- aVideo->GetEndTime());
+ aVideo->GetEndTime().ToMicroseconds());
mStateObj->HandleVideoDecoded(aVideo, videoDecodeStartTime);
},
[this, self] (const MediaResult& aError) {
LOGV("OnVideoNotDecoded aError=%" PRIu32 , static_cast<uint32_t>(aError.Code()));
mVideoDataRequest.Complete();
switch (aError.Code()) {
case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA:
mStateObj->HandleWaitingForVideo();
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -2006,19 +2006,18 @@ MediaFormatReader::HandleDemuxedSamples(
}
}
decoder.mInfo = info;
if (sample->mKeyframe) {
ScheduleUpdate(aTrack);
} else {
- TimeInterval time =
- TimeInterval(TimeUnit::FromMicroseconds(sample->mTime),
- TimeUnit::FromMicroseconds(sample->GetEndTime()));
+ auto time = TimeInterval(
+ TimeUnit::FromMicroseconds(sample->mTime), sample->GetEndTime());
InternalSeekTarget seekTarget =
decoder.mTimeThreshold.refOr(InternalSeekTarget(time, false));
LOG("Stream change occurred on a non-keyframe. Seeking to:%" PRId64,
sample->mTime);
InternalSeek(aTrack, seekTarget);
}
return;
}
@@ -2215,17 +2214,17 @@ MediaFormatReader::Update(TrackType aTra
if (decoder.HasPromise()) {
needOutput = true;
if (decoder.mOutput.Length()) {
RefPtr<MediaData> output = decoder.mOutput[0];
decoder.mOutput.RemoveElementAt(0);
decoder.mSizeOfQueue -= 1;
decoder.mLastSampleTime =
Some(TimeInterval(TimeUnit::FromMicroseconds(output->mTime),
- TimeUnit::FromMicroseconds(output->GetEndTime())));
+ output->GetEndTime()));
decoder.mNumSamplesOutputTotal++;
ReturnOutput(output, aTrack);
// We have a decoded sample ready to be returned.
if (aTrack == TrackType::kVideoTrack) {
uint64_t delta =
decoder.mNumSamplesOutputTotal - mLastReportedNumDecodedFrames;
a.mStats.mDecodedFrames = static_cast<uint32_t>(delta);
mLastReportedNumDecodedFrames = decoder.mNumSamplesOutputTotal;
@@ -2375,17 +2374,17 @@ MediaFormatReader::Update(TrackType aTra
}
void
MediaFormatReader::ReturnOutput(MediaData* aData, TrackType aTrack)
{
MOZ_ASSERT(GetDecoderData(aTrack).HasPromise());
MOZ_DIAGNOSTIC_ASSERT(aData->mType != MediaData::NULL_DATA);
LOG("Resolved data promise for %s [%" PRId64 ", %" PRId64 "]", TrackTypeToStr(aTrack),
- aData->mTime, aData->GetEndTime());
+ aData->mTime, aData->GetEndTime().ToMicroseconds());
if (aTrack == TrackInfo::kAudioTrack) {
AudioData* audioData = static_cast<AudioData*>(aData);
if (audioData->mChannels != mInfo.mAudio.mChannels
|| audioData->mRate != mInfo.mAudio.mRate) {
LOG("change of audio format (rate:%d->%d). "
"This is an unsupported configuration",
--- a/dom/media/MediaQueue.h
+++ b/dom/media/MediaQueue.h
@@ -42,17 +42,17 @@ public:
return nsDeque::GetSize();
}
inline void Push(T* aItem) {
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
MOZ_ASSERT(!mEndOfStream);
MOZ_ASSERT(aItem);
NS_ADDREF(aItem);
- MOZ_ASSERT(aItem->GetEndTime() >= aItem->mTime);
+ MOZ_ASSERT(aItem->GetEndTime().ToMicroseconds() >= aItem->mTime);
nsDeque::Push(aItem);
mPushEvent.Notify(RefPtr<T>(aItem));
}
inline already_AddRefed<T> PopFront() {
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
RefPtr<T> rv = dont_AddRef(static_cast<T*>(nsDeque::PopFront()));
if (rv) {
@@ -99,34 +99,34 @@ public:
// Returns the approximate number of microseconds of items in the queue.
int64_t Duration() {
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
if (GetSize() == 0) {
return 0;
}
T* last = static_cast<T*>(nsDeque::Peek());
T* first = static_cast<T*>(nsDeque::PeekFront());
- return last->GetEndTime() - first->mTime;
+ return last->GetEndTime().ToMicroseconds() - first->mTime;
}
void LockedForEach(nsDequeFunctor& aFunctor) const {
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
ForEach(aFunctor);
}
// Extracts elements from the queue into aResult, in order.
// Elements whose start time is before aTime are ignored.
void GetElementsAfter(int64_t aTime, nsTArray<RefPtr<T>>* aResult) {
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
if (GetSize() == 0)
return;
size_t i;
for (i = GetSize() - 1; i > 0; --i) {
T* v = static_cast<T*>(ObjectAt(i));
- if (v->GetEndTime() < aTime)
+ if (v->GetEndTime().ToMicroseconds() < aTime)
break;
}
// Elements less than i have a end time before aTime. It's also possible
// that the element at i has a end time before aTime, but that's OK.
for (; i < GetSize(); ++i) {
RefPtr<T> elem = static_cast<T*>(ObjectAt(static_cast<size_t>(i)));
aResult->AppendElement(elem);
}
--- a/dom/media/android/AndroidMediaReader.cpp
+++ b/dom/media/android/AndroidMediaReader.cpp
@@ -248,17 +248,17 @@ bool AndroidMediaReader::DecodeVideoFram
// timestamp of the previous frame. We can then return the previously
// decoded frame, and it will have a valid timestamp.
int64_t duration = v->mTime - mLastVideoFrame->mTime;
mLastVideoFrame->UpdateDuration(TimeUnit::FromMicroseconds(duration));
// We have the start time of the next frame, so we can push the previous
// frame into the queue, except if the end time is below the threshold,
// in which case it wouldn't be displayed anyway.
- if (mLastVideoFrame->GetEndTime() < aTimeThreshold.ToMicroseconds()) {
+ if (mLastVideoFrame->GetEndTime() < aTimeThreshold) {
mLastVideoFrame = nullptr;
continue;
}
// Buffer the current frame we just decoded.
mVideoQueue.Push(mLastVideoFrame);
mLastVideoFrame = v;
--- a/dom/media/mediasink/AudioSink.cpp
+++ b/dom/media/mediasink/AudioSink.cpp
@@ -444,17 +444,17 @@ AudioSink::NotifyAudioNeeded()
mErrored = true;
return;
}
RefPtr<AudioData> silence = CreateAudioFromBuffer(Move(silenceData), data);
PushProcessedAudio(silence);
}
}
- mLastEndTime = TimeUnit::FromMicroseconds(data->GetEndTime());
+ mLastEndTime = data->GetEndTime();
mFramesParsed += data->mFrames;
if (mConverter->InputConfig() != mConverter->OutputConfig()) {
// We must ensure that the size in the buffer contains exactly the number
// of frames, in case one of the audio producer over allocated the buffer.
AlignedAudioBuffer buffer(Move(data->mAudioData));
buffer.SetLength(size_t(data->mFrames) * data->mChannels);
--- a/dom/media/mediasink/DecodedStream.cpp
+++ b/dom/media/mediasink/DecodedStream.cpp
@@ -461,17 +461,17 @@ SendStreamAudio(DecodedStreamData* aStre
// the exact same silences
CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten
+ TimeUnitToFrames(aStartTime, aRate);
CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate);
if (!audioWrittenOffset.isValid() ||
!frameOffset.isValid() ||
// ignore packet that we've already processed
- audio->GetEndTime() <= aStream->mNextAudioTime.ToMicroseconds()) {
+ audio->GetEndTime() <= aStream->mNextAudioTime) {
return;
}
if (audioWrittenOffset.value() + AUDIO_FUZZ_FRAMES < frameOffset.value()) {
int64_t silentFrames = frameOffset.value() - audioWrittenOffset.value();
// Write silence to catch up
AudioSegment silence;
silence.InsertNullDataAtStart(silentFrames);
@@ -487,17 +487,17 @@ SendStreamAudio(DecodedStreamData* aStre
AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
AutoTArray<const AudioDataValue*, 2> channels;
for (uint32_t i = 0; i < audio->mChannels; ++i) {
channels.AppendElement(bufferData + i * audio->mFrames);
}
aOutput->AppendFrames(buffer.forget(), channels, audio->mFrames, aPrincipalHandle);
aStream->mAudioFramesWritten += audio->mFrames;
- aStream->mNextAudioTime = media::TimeUnit::FromMicroseconds(audio->GetEndTime());
+ aStream->mNextAudioTime = audio->GetEndTime();
}
void
DecodedStream::SendAudio(double aVolume, bool aIsSameOrigin,
const PrincipalHandle& aPrincipalHandle)
{
AssertOwnerThread();
@@ -608,23 +608,22 @@ DecodedStream::SendVideo(bool aIsSameOri
WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage,
FromMicroseconds(v->mTime),
mData->mNextVideoTime, mData->mLastVideoImageDisplaySize,
tracksStartTimeStamp + TimeDuration::FromMicroseconds(v->mTime),
&output, aPrincipalHandle);
mData->mNextVideoTime = FromMicroseconds(v->mTime);
}
- if (mData->mNextVideoTime.ToMicroseconds() < v->GetEndTime()) {
- WriteVideoToMediaStream(sourceStream, v->mImage,
- FromMicroseconds(v->GetEndTime()),
+ if (mData->mNextVideoTime < v->GetEndTime()) {
+ WriteVideoToMediaStream(sourceStream, v->mImage, v->GetEndTime(),
mData->mNextVideoTime, v->mDisplay,
- tracksStartTimeStamp + TimeDuration::FromMicroseconds(v->GetEndTime()),
+ tracksStartTimeStamp + v->GetEndTime().ToTimeDuration(),
&output, aPrincipalHandle);
- mData->mNextVideoTime = FromMicroseconds(v->GetEndTime());
+ mData->mNextVideoTime = v->GetEndTime();
mData->mLastVideoImage = v->mImage;
mData->mLastVideoImageDisplaySize = v->mDisplay;
}
}
// Check the output is not empty.
if (output.GetLastFrame()) {
mData->mEOSVideoCompensation = ZeroDurationAtLastChunk(output);
--- a/dom/media/mediasink/VideoSink.cpp
+++ b/dom/media/mediasink/VideoSink.cpp
@@ -405,56 +405,59 @@ VideoSink::RenderVideoFrames(int32_t aMa
void
VideoSink::UpdateRenderedVideoFrames()
{
AssertOwnerThread();
MOZ_ASSERT(mAudioSink->IsPlaying(), "should be called while playing.");
// Get the current playback position.
TimeStamp nowTime;
- const int64_t clockTime = mAudioSink->GetPosition(&nowTime).ToMicroseconds();
- NS_ASSERTION(clockTime >= 0, "Should have positive clock time.");
+ const auto clockTime = mAudioSink->GetPosition(&nowTime);
+ MOZ_ASSERT(!clockTime.IsNegative(), "Should have positive clock time.");
// Skip frames up to the playback position.
- int64_t lastFrameEndTime = 0;
+ TimeUnit lastFrameEndTime;
while (VideoQueue().GetSize() > mMinVideoQueueSize &&
clockTime >= VideoQueue().PeekFront()->GetEndTime()) {
RefPtr<VideoData> frame = VideoQueue().PopFront();
lastFrameEndTime = frame->GetEndTime();
if (frame->IsSentToCompositor()) {
mFrameStats.NotifyPresentedFrame();
} else {
mFrameStats.NotifyDecodedFrames({ 0, 0, 1 });
VSINK_LOG_V("discarding video frame mTime=%" PRId64 " clock_time=%" PRId64,
- frame->mTime, clockTime);
+ frame->mTime, clockTime.ToMicroseconds());
}
}
// The presentation end time of the last video frame displayed is either
// the end time of the current frame, or if we dropped all frames in the
// queue, the end time of the last frame we removed from the queue.
RefPtr<VideoData> currentFrame = VideoQueue().PeekFront();
- mVideoFrameEndTime = std::max(mVideoFrameEndTime, TimeUnit::FromMicroseconds(
- currentFrame ? currentFrame->GetEndTime() : lastFrameEndTime));
+ mVideoFrameEndTime = std::max(mVideoFrameEndTime,
+ currentFrame ? currentFrame->GetEndTime() : lastFrameEndTime);
MaybeResolveEndPromise();
- RenderVideoFrames(mVideoQueueSendToCompositorSize, clockTime, nowTime);
+ RenderVideoFrames(
+ mVideoQueueSendToCompositorSize,
+ clockTime.ToMicroseconds(), nowTime);
// Get the timestamp of the next frame. Schedule the next update at
// the start time of the next frame. If we don't have a next frame,
// we will run render loops again upon incoming frames.
nsTArray<RefPtr<VideoData>> frames;
VideoQueue().GetFirstElements(2, &frames);
if (frames.Length() < 2) {
return;
}
int64_t nextFrameTime = frames[1]->mTime;
- int64_t delta = std::max<int64_t>((nextFrameTime - clockTime), MIN_UPDATE_INTERVAL_US);
+ int64_t delta = std::max(
+ nextFrameTime - clockTime.ToMicroseconds(), MIN_UPDATE_INTERVAL_US);
TimeStamp target = nowTime + TimeDuration::FromMicroseconds(
delta / mAudioSink->GetPlaybackParams().mPlaybackRate);
RefPtr<VideoSink> self = this;
mUpdateScheduler.Ensure(target, [self] () {
self->UpdateRenderedVideoFramesByTimer();
}, [self] () {
self->UpdateRenderedVideoFramesByTimer();
--- a/dom/media/mediasource/TrackBuffersManager.cpp
+++ b/dom/media/mediasource/TrackBuffersManager.cpp
@@ -442,17 +442,17 @@ TrackBuffersManager::DoEvictData(const T
if (frame->mKeyframe) {
lastKeyFrameIndex = i;
toEvict -= partialEvict;
if (toEvict < 0) {
break;
}
partialEvict = 0;
}
- if (frame->GetEndTime() >= lowerLimit.ToMicroseconds()) {
+ if (frame->GetEndTime() >= lowerLimit) {
break;
}
partialEvict += frame->ComputedSizeOfIncludingThis();
}
const int64_t finalSize = mSizeSourceBuffer - aSizeToEvict;
if (lastKeyFrameIndex > 0) {
@@ -1416,23 +1416,23 @@ TrackBuffersManager::CheckSequenceDiscon
}
}
TimeInterval
TrackBuffersManager::PresentationInterval(const TrackBuffer& aSamples) const
{
TimeInterval presentationInterval =
TimeInterval(TimeUnit::FromMicroseconds(aSamples[0]->mTime),
- TimeUnit::FromMicroseconds(aSamples[0]->GetEndTime()));
+ aSamples[0]->GetEndTime());
for (uint32_t i = 1; i < aSamples.Length(); i++) {
auto& sample = aSamples[i];
presentationInterval = presentationInterval.Span(
TimeInterval(TimeUnit::FromMicroseconds(sample->mTime),
- TimeUnit::FromMicroseconds(sample->GetEndTime())));
+ sample->GetEndTime()));
}
return presentationInterval;
}
void
TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
{
if (!aSamples.Length()) {
@@ -1484,23 +1484,22 @@ TrackBuffersManager::ProcessFrames(Track
aTrackData.mLastParsedEndTime = TimeUnit();
}
for (auto& sample : aSamples) {
SAMPLE_DEBUG("Processing %s frame(pts:%" PRId64 " end:%" PRId64 ", dts:%" PRId64 ", duration:%" PRId64 ", "
"kf:%d)",
aTrackData.mInfo->mMimeType.get(),
sample->mTime,
- sample->GetEndTime(),
+ sample->GetEndTime().ToMicroseconds(),
sample->mTimecode,
sample->mDuration.ToMicroseconds(),
sample->mKeyframe);
- const TimeUnit sampleEndTime =
- TimeUnit::FromMicroseconds(sample->GetEndTime());
+ const TimeUnit sampleEndTime = sample->GetEndTime();
if (sampleEndTime > aTrackData.mLastParsedEndTime) {
aTrackData.mLastParsedEndTime = sampleEndTime;
}
// We perform step 10 right away as we can't do anything should a keyframe
// be needed until we have one.
// 10. If the need random access point flag on track buffer equals true, then run the following steps:
@@ -1691,17 +1690,17 @@ TrackBuffersManager::CheckNextInsertionI
aTrackData.mNextInsertionIndex = Some(uint32_t(data.Length()));
return true;
}
// We now need to find the first frame of the searched interval.
// We will insert our new frames right before.
for (uint32_t i = 0; i < data.Length(); i++) {
const RefPtr<MediaRawData>& sample = data[i];
if (sample->mTime >= target.mStart.ToMicroseconds() ||
- sample->GetEndTime() > target.mStart.ToMicroseconds()) {
+ sample->GetEndTime() > target.mStart) {
aTrackData.mNextInsertionIndex = Some(i);
return true;
}
}
NS_ASSERTION(false, "Insertion Index Not Found");
return false;
}
@@ -1834,17 +1833,17 @@ TrackBuffersManager::RemoveFrames(const
// If highest end timestamp for track buffer is set and less than or equal to presentation timestamp:
// Remove all coded frames from track buffer that have a presentation timestamp greater than or equal to highest end timestamp and less than frame end timestamp"
TimeUnit intervalsEnd = aIntervals.GetEnd();
bool mayBreakLoop = false;
for (uint32_t i = aStartIndex; i < data.Length(); i++) {
const RefPtr<MediaRawData> sample = data[i];
TimeInterval sampleInterval =
TimeInterval(TimeUnit::FromMicroseconds(sample->mTime),
- TimeUnit::FromMicroseconds(sample->GetEndTime()));
+ sample->GetEndTime());
if (aIntervals.Contains(sampleInterval)) {
if (firstRemovedIndex.isNothing()) {
firstRemovedIndex = Some(i);
}
lastRemovedIndex = i;
mayBreakLoop = false;
continue;
}
@@ -1872,17 +1871,17 @@ TrackBuffersManager::RemoveFrames(const
TimeUnit maxSampleDuration;
uint32_t sizeRemoved = 0;
TimeIntervals removedIntervals;
for (uint32_t i = firstRemovedIndex.ref(); i <= lastRemovedIndex; i++) {
const RefPtr<MediaRawData> sample = data[i];
TimeInterval sampleInterval =
TimeInterval(TimeUnit::FromMicroseconds(sample->mTime),
- TimeUnit::FromMicroseconds(sample->GetEndTime()));
+ sample->GetEndTime());
removedIntervals += sampleInterval;
if (sample->mDuration > maxSampleDuration) {
maxSampleDuration = sample->mDuration;
}
sizeRemoved += sample->ComputedSizeOfIncludingThis();
}
aTrackData.mSizeBuffer -= sizeRemoved;
@@ -2113,17 +2112,17 @@ TrackBuffersManager::GetTrackBuffer(Trac
uint32_t TrackBuffersManager::FindSampleIndex(const TrackBuffer& aTrackBuffer,
const TimeInterval& aInterval)
{
TimeUnit target = aInterval.mStart - aInterval.mFuzz;
for (uint32_t i = 0; i < aTrackBuffer.Length(); i++) {
const RefPtr<MediaRawData>& sample = aTrackBuffer[i];
if (sample->mTime >= target.ToMicroseconds() ||
- sample->GetEndTime() > target.ToMicroseconds()) {
+ sample->GetEndTime() > target) {
return i;
}
}
NS_ASSERTION(false, "FindSampleIndex called with invalid arguments");
return 0;
}
@@ -2240,17 +2239,17 @@ TrackBuffersManager::SkipToNextRandomAcc
}
if (sample->mKeyframe &&
sample->mTime >= aTimeThreadshold.ToMicroseconds()) {
aFound = true;
break;
}
nextSampleTimecode =
TimeUnit::FromMicroseconds(sample->mTimecode) + sample->mDuration;
- nextSampleTime = TimeUnit::FromMicroseconds(sample->GetEndTime());
+ nextSampleTime = sample->GetEndTime();
parsed++;
}
// Adjust the next demux time and index so that the next call to
// SkipToNextRandomAccessPoint will not count again the parsed sample as
// skipped.
if (aFound) {
trackData.mNextSampleTimecode =
@@ -2356,18 +2355,17 @@ TrackBuffersManager::GetSample(TrackInfo
}
if (p->mKeyframe) {
UpdateEvictionIndex(trackData, trackData.mNextGetSampleIndex.ref());
}
trackData.mNextGetSampleIndex.ref()++;
// Estimate decode timestamp and timestamp of the next sample.
TimeUnit nextSampleTimecode =
TimeUnit::FromMicroseconds(sample->mTimecode) + sample->mDuration;
- TimeUnit nextSampleTime =
- TimeUnit::FromMicroseconds(sample->GetEndTime());
+ TimeUnit nextSampleTime = sample->GetEndTime();
const MediaRawData* nextSample =
GetSample(aTrack,
trackData.mNextGetSampleIndex.ref(),
nextSampleTimecode,
nextSampleTime,
aFuzz);
if (nextSample) {
// We have a valid next sample, can use exact values.
@@ -2414,18 +2412,17 @@ TrackBuffersManager::GetSample(TrackInfo
int32_t i = pos;
for (; !track[i]->mKeyframe; i--) {
}
UpdateEvictionIndex(trackData, i);
trackData.mNextGetSampleIndex = Some(uint32_t(pos)+1);
trackData.mNextSampleTimecode =
TimeUnit::FromMicroseconds(sample->mTimecode) + sample->mDuration;
- trackData.mNextSampleTime =
- TimeUnit::FromMicroseconds(sample->GetEndTime());
+ trackData.mNextSampleTime = sample->GetEndTime();
aResult = NS_OK;
return p.forget();
}
int32_t
TrackBuffersManager::FindCurrentPosition(TrackInfo::TrackType aTrack,
const TimeUnit& aFuzz) const
{
@@ -2468,17 +2465,17 @@ TrackBuffersManager::FindCurrentPosition
}
// We couldn't find our sample by decode timestamp. Attempt to find it using
// presentation timestamp. There will likely be small jerkiness.
for (uint32_t i = 0; i < track.Length(); i++) {
const RefPtr<MediaRawData>& sample = track[i];
TimeInterval sampleInterval{
TimeUnit::FromMicroseconds(sample->mTime),
- TimeUnit::FromMicroseconds(sample->GetEndTime()),
+ sample->GetEndTime(),
aFuzz};
if (sampleInterval.ContainsWithStrictEnd(trackData.mNextSampleTimecode)) {
return i;
}
}
// Still not found.
@@ -2511,17 +2508,17 @@ TrackBuffersManager::GetNextRandomAccess
if (!sample) {
break;
}
if (sample->mKeyframe) {
return TimeUnit::FromMicroseconds(sample->mTime);
}
nextSampleTimecode =
TimeUnit::FromMicroseconds(sample->mTimecode) + sample->mDuration;
- nextSampleTime = TimeUnit::FromMicroseconds(sample->GetEndTime());
+ nextSampleTime = sample->GetEndTime();
}
return TimeUnit::FromInfinity();
}
void
TrackBuffersManager::TrackData::AddSizeOfResources(MediaSourceDecoder::ResourceSizes* aSizes) const
{
for (const TrackBuffer& buffer : mBuffers) {
--- a/dom/media/ogg/OggDemuxer.cpp
+++ b/dom/media/ogg/OggDemuxer.cpp
@@ -1352,17 +1352,17 @@ OggTrackDemuxer::NextSample()
}
if (mType == TrackInfo::kAudioTrack) {
data->mTrackInfo = mParent->mSharedAudioTrackInfo;
}
if (eos) {
// We've encountered an end of bitstream packet; check for a chained
// bitstream following this one.
// This will also update mSharedAudioTrackInfo.
- mParent->ReadOggChain(TimeUnit::FromMicroseconds(data->GetEndTime()));
+ mParent->ReadOggChain(data->GetEndTime());
}
return data;
}
RefPtr<OggTrackDemuxer::SamplesPromise>
OggTrackDemuxer::GetSamples(int32_t aNumSamples)
{
RefPtr<SamplesHolder> samples = new SamplesHolder;