--- a/dom/media/ADTSDemuxer.cpp
+++ b/dom/media/ADTSDemuxer.cpp
@@ -294,16 +294,18 @@ InitAudioSpecificConfig(const Frame& fra
asc[0] = (audioObjectType & 0x1F) << 3 | (samplingFrequencyIndex & 0x0E) >> 1;
asc[1] = (samplingFrequencyIndex & 0x01) << 7 | (channelConfig & 0x0F) << 3;
aBuffer->AppendElements(asc, 2);
}
} // namespace adts
+using media::TimeUnit;
+
// ADTSDemuxer
ADTSDemuxer::ADTSDemuxer(MediaResource* aSource)
: mSource(aSource)
{
}
bool
@@ -379,29 +381,29 @@ ADTSTrackDemuxer::ADTSTrackDemuxer(Media
ADTSTrackDemuxer::~ADTSTrackDemuxer()
{
delete mParser;
}
bool
ADTSTrackDemuxer::Init()
{
- FastSeek(media::TimeUnit());
+ FastSeek(TimeUnit::Zero());
// Read the first frame to fetch sample rate and other meta data.
RefPtr<MediaRawData> frame(GetNextFrame(FindNextFrame(true)));
ADTSLOG("Init StreamLength()=%" PRId64 " first-frame-found=%d",
StreamLength(), !!frame);
if (!frame) {
return false;
}
// Rewind back to the stream begin to avoid dropping the first frame.
- FastSeek(media::TimeUnit());
+ FastSeek(TimeUnit::Zero());
if (!mInfo) {
mInfo = MakeUnique<AudioInfo>();
}
mInfo->mRate = mSamplesPerSecond;
mInfo->mChannels = mChannels;
mInfo->mBitDepth = 16;
@@ -432,28 +434,28 @@ ADTSTrackDemuxer::Init()
UniquePtr<TrackInfo>
ADTSTrackDemuxer::GetInfo() const
{
return mInfo->Clone();
}
RefPtr<ADTSTrackDemuxer::SeekPromise>
-ADTSTrackDemuxer::Seek(const media::TimeUnit& aTime)
+ADTSTrackDemuxer::Seek(const TimeUnit& aTime)
{
// Efficiently seek to the position.
FastSeek(aTime);
// Correct seek position by scanning the next frames.
- const media::TimeUnit seekTime = ScanUntil(aTime);
+ const TimeUnit seekTime = ScanUntil(aTime);
return SeekPromise::CreateAndResolve(seekTime, __func__);
}
-media::TimeUnit
-ADTSTrackDemuxer::FastSeek(const media::TimeUnit& aTime)
+TimeUnit
+ADTSTrackDemuxer::FastSeek(const TimeUnit& aTime)
{
ADTSLOG("FastSeek(%" PRId64 ") avgFrameLen=%f mNumParsedFrames=%" PRIu64
" mFrameIndex=%" PRId64 " mOffset=%" PRIu64,
aTime.ToMicroseconds(), AverageFrameLength(), mNumParsedFrames,
mFrameIndex, mOffset);
const int64_t firstFrameOffset = mParser->FirstFrame().Offset();
if (!aTime.ToMicroseconds()) {
@@ -475,18 +477,18 @@ ADTSTrackDemuxer::FastSeek(const media::
" mFrameIndex=%" PRId64 " mFirstFrameOffset=%" PRIu64 " mOffset=%" PRIu64
" SL=%" PRIu64 "",
AverageFrameLength(), mNumParsedFrames, mFrameIndex,
firstFrameOffset, mOffset, StreamLength());
return Duration(mFrameIndex);
}
-media::TimeUnit
-ADTSTrackDemuxer::ScanUntil(const media::TimeUnit& aTime)
+TimeUnit
+ADTSTrackDemuxer::ScanUntil(const TimeUnit& aTime)
{
ADTSLOG("ScanUntil(%" PRId64 ") avgFrameLen=%f mNumParsedFrames=%" PRIu64
" mFrameIndex=%" PRId64 " mOffset=%" PRIu64,
aTime.ToMicroseconds(), AverageFrameLength(), mNumParsedFrames,
mFrameIndex, mOffset);
if (!aTime.ToMicroseconds()) {
return FastSeek(aTime);
@@ -552,75 +554,74 @@ ADTSTrackDemuxer::GetSamples(int32_t aNu
void
ADTSTrackDemuxer::Reset()
{
ADTSLOG("Reset()");
MOZ_ASSERT(mParser);
if (mParser) {
mParser->Reset();
}
- FastSeek(media::TimeUnit());
+ FastSeek(TimeUnit::Zero());
}
RefPtr<ADTSTrackDemuxer::SkipAccessPointPromise>
-ADTSTrackDemuxer::SkipToNextRandomAccessPoint(
- const media::TimeUnit& aTimeThreshold)
+ADTSTrackDemuxer::SkipToNextRandomAccessPoint(const TimeUnit& aTimeThreshold)
{
// Will not be called for audio-only resources.
return SkipAccessPointPromise::CreateAndReject(
SkipFailureHolder(NS_ERROR_DOM_MEDIA_DEMUXER_ERR, 0), __func__);
}
int64_t
ADTSTrackDemuxer::GetResourceOffset() const
{
return mOffset;
}
media::TimeIntervals
ADTSTrackDemuxer::GetBuffered()
{
- media::TimeUnit duration = Duration();
+ auto duration = Duration();
- if (duration <= media::TimeUnit()) {
+ if (!duration.IsPositive()) {
return media::TimeIntervals();
}
AutoPinned<MediaResource> stream(mSource.GetResource());
return GetEstimatedBufferedTimeRanges(stream, duration.ToMicroseconds());
}
int64_t
ADTSTrackDemuxer::StreamLength() const
{
return mSource.GetLength();
}
-media::TimeUnit
+TimeUnit
ADTSTrackDemuxer::Duration() const
{
if (!mNumParsedFrames) {
- return media::TimeUnit::FromMicroseconds(-1);
+ return TimeUnit::FromMicroseconds(-1);
}
const int64_t streamLen = StreamLength();
if (streamLen < 0) {
// Unknown length, we can't estimate duration.
- return media::TimeUnit::FromMicroseconds(-1);
+ return TimeUnit::FromMicroseconds(-1);
}
const int64_t firstFrameOffset = mParser->FirstFrame().Offset();
int64_t numFrames = (streamLen - firstFrameOffset) / AverageFrameLength();
return Duration(numFrames);
}
-media::TimeUnit
+TimeUnit
ADTSTrackDemuxer::Duration(int64_t aNumFrames) const
{
if (!mSamplesPerSecond) {
- return media::TimeUnit::FromMicroseconds(-1);
+ return TimeUnit::FromMicroseconds(-1);
}
return FramesToTimeUnit(aNumFrames * mSamplesPerFrame, mSamplesPerSecond);
}
const adts::Frame&
ADTSTrackDemuxer::FindNextFrame(bool findFirstFrame /*= false*/)
{
@@ -779,17 +780,17 @@ ADTSTrackDemuxer::FrameIndexFromOffset(i
(aOffset - mParser->FirstFrame().Offset()) / AverageFrameLength();
}
ADTSLOGV("FrameIndexFromOffset(%" PRId64 ") -> %" PRId64, aOffset, frameIndex);
return std::max<int64_t>(0, frameIndex);
}
int64_t
-ADTSTrackDemuxer::FrameIndexFromTime(const media::TimeUnit& aTime) const
+ADTSTrackDemuxer::FrameIndexFromTime(const TimeUnit& aTime) const
{
int64_t frameIndex = 0;
if (mSamplesPerSecond > 0 && mSamplesPerFrame > 0) {
frameIndex = aTime.ToSeconds() * mSamplesPerSecond / mSamplesPerFrame - 1;
}
ADTSLOGV("FrameIndexFromOffset(%fs) -> %" PRId64,
aTime.ToSeconds(), frameIndex);
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -1386,30 +1386,30 @@ MediaDecoder::GetSeekable()
if (mMediaSeekableOnlyInBufferedRanges) {
return GetBuffered();
} else if (!IsMediaSeekable()) {
return media::TimeIntervals();
} else if (!IsTransportSeekable()) {
return GetBuffered();
} else {
return media::TimeIntervals(
- media::TimeInterval(media::TimeUnit::FromMicroseconds(0),
+ media::TimeInterval(TimeUnit::Zero(),
IsInfinite()
- ? media::TimeUnit::FromInfinity()
- : media::TimeUnit::FromSeconds(GetDuration())));
+ ? TimeUnit::FromInfinity()
+ : TimeUnit::FromSeconds(GetDuration())));
}
}
void
MediaDecoder::SetFragmentEndTime(double aTime)
{
MOZ_ASSERT(NS_IsMainThread());
if (mDecoderStateMachine) {
mDecoderStateMachine->DispatchSetFragmentEndTime(
- media::TimeUnit::FromSeconds(aTime));
+ TimeUnit::FromSeconds(aTime));
}
}
void
MediaDecoder::Suspend()
{
MOZ_ASSERT(NS_IsMainThread());
if (mResource) {
@@ -1765,22 +1765,21 @@ MediaDecoder::RemoveMediaTracks()
}
MediaDecoderOwner::NextFrameStatus
MediaDecoder::NextFrameBufferedStatus()
{
MOZ_ASSERT(NS_IsMainThread());
// Next frame hasn't been decoded yet.
// Use the buffered range to consider if we have the next frame available.
- media::TimeUnit currentPosition =
- media::TimeUnit::FromMicroseconds(CurrentPosition());
+ TimeUnit currentPosition = TimeUnit::FromMicroseconds(CurrentPosition());
media::TimeInterval interval(
currentPosition,
currentPosition
- + media::TimeUnit::FromMicroseconds(DEFAULT_NEXT_FRAME_AVAILABLE_BUFFERED));
+ + TimeUnit::FromMicroseconds(DEFAULT_NEXT_FRAME_AVAILABLE_BUFFERED));
return GetBuffered().Contains(interval)
? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
: MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
}
nsCString
MediaDecoder::GetDebugInfo()
{
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -551,17 +551,17 @@ public:
bool IsHardwareAccelerated(nsACString& aFailureReason) const override
{
return mDecoder->IsHardwareAccelerated(aFailureReason);
}
const char* GetDescriptionName() const override
{
return mDecoder->GetDescriptionName();
}
- void SetSeekThreshold(const media::TimeUnit& aTime) override
+ void SetSeekThreshold(const TimeUnit& aTime) override
{
mDecoder->SetSeekThreshold(aTime);
}
bool SupportDecoderRecycling() const override
{
return mDecoder->SupportDecoderRecycling();
}
RefPtr<ShutdownPromise> Shutdown() override
@@ -876,17 +876,17 @@ public:
UniquePtr<TrackInfo> GetInfo() const override
{
if (!mInfo) {
return nullptr;
}
return mInfo->Clone();
}
- RefPtr<SeekPromise> Seek(const media::TimeUnit& aTime) override
+ RefPtr<SeekPromise> Seek(const TimeUnit& aTime) override
{
RefPtr<Wrapper> self = this;
return InvokeAsync(
mTaskQueue, __func__,
[self, aTime]() { return self->mTrackDemuxer->Seek(aTime); })
->Then(mTaskQueue, __func__,
[self]() { self->UpdateRandomAccessPoint(); },
[self]() { self->UpdateRandomAccessPoint(); });
@@ -922,17 +922,17 @@ public:
MutexAutoLock lock(mMutex);
if (NS_SUCCEEDED(mNextRandomAccessPointResult)) {
*aTime = mNextRandomAccessPoint;
}
return mNextRandomAccessPointResult;
}
RefPtr<SkipAccessPointPromise>
- SkipToNextRandomAccessPoint(const media::TimeUnit& aTimeThreshold) override
+ SkipToNextRandomAccessPoint(const TimeUnit& aTimeThreshold) override
{
RefPtr<Wrapper> self = this;
return InvokeAsync(
mTaskQueue, __func__,
[self, aTimeThreshold]() {
return self->mTrackDemuxer->SkipToNextRandomAccessPoint(
aTimeThreshold);
})
@@ -1419,18 +1419,18 @@ MediaFormatReader::OnDemuxerInitDone(con
mTags = Move(tags);
mInitDone = true;
// Try to get the start time.
// For MSE case, the start time of each track is assumed to be 0.
// For others, we must demux the first sample to know the start time for each
// track.
if (!mDemuxer->ShouldComputeStartTime()) {
- mAudio.mFirstDemuxedSampleTime.emplace(TimeUnit::FromMicroseconds(0));
- mVideo.mFirstDemuxedSampleTime.emplace(TimeUnit::FromMicroseconds(0));
+ mAudio.mFirstDemuxedSampleTime.emplace(TimeUnit::Zero());
+ mVideo.mFirstDemuxedSampleTime.emplace(TimeUnit::Zero());
} else {
if (HasAudio()) {
RequestDemuxSamples(TrackInfo::kAudioTrack);
}
if (HasVideo()) {
RequestDemuxSamples(TrackInfo::kVideoTrack);
}
@@ -1507,34 +1507,34 @@ MediaFormatReader::GetDecoderData(TrackT
if (aTrack == TrackInfo::kAudioTrack) {
return mAudio;
}
return mVideo;
}
bool
MediaFormatReader::ShouldSkip(bool aSkipToNextKeyframe,
- media::TimeUnit aTimeThreshold)
+ TimeUnit aTimeThreshold)
{
MOZ_ASSERT(HasVideo());
- media::TimeUnit nextKeyframe;
+ TimeUnit nextKeyframe;
nsresult rv = mVideo.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe);
if (NS_FAILED(rv)) {
return aSkipToNextKeyframe;
}
return (nextKeyframe < aTimeThreshold
|| (mVideo.mTimeThreshold
&& mVideo.mTimeThreshold.ref().EndTime() < aTimeThreshold))
&& nextKeyframe.ToMicroseconds() >= 0
&& !nextKeyframe.IsInfinite();
}
RefPtr<MediaDecoderReader::VideoDataPromise>
MediaFormatReader::RequestVideoData(bool aSkipToNextKeyframe,
- const media::TimeUnit& aTimeThreshold)
+ const TimeUnit& aTimeThreshold)
{
MOZ_ASSERT(OnTaskQueue());
MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty(),
"No sample requests allowed while seeking");
MOZ_DIAGNOSTIC_ASSERT(!mVideo.HasPromise(), "No duplicate sample requests");
MOZ_DIAGNOSTIC_ASSERT(!mVideo.mSeekRequest.Exists()
|| mVideo.mTimeThreshold.isSome());
MOZ_DIAGNOSTIC_ASSERT(!IsSeeking(), "called mid-seek");
@@ -2047,17 +2047,17 @@ MediaFormatReader::InternalSeek(TrackTyp
auto& decoder = GetDecoderData(aTrack);
decoder.Flush();
decoder.ResetDemuxer();
decoder.mTimeThreshold = Some(aTarget);
RefPtr<MediaFormatReader> self = this;
decoder.mTrackDemuxer->Seek(decoder.mTimeThreshold.ref().Time())
->Then(OwnerThread(), __func__,
- [self, aTrack] (media::TimeUnit aTime) {
+ [self, aTrack] (TimeUnit aTime) {
auto& decoder = self->GetDecoderData(aTrack);
decoder.mSeekRequest.Complete();
MOZ_ASSERT(
decoder.mTimeThreshold,
"Seek promise must be disconnected when timethreshold is reset");
decoder.mTimeThreshold.ref().mHasSeeked = true;
self->SetVideoDecodeThreshold();
self->ScheduleUpdate(aTrack);
@@ -2181,17 +2181,17 @@ MediaFormatReader::Update(TrackType aTra
// Record number of frames decoded and parsed. Automatically update the
// stats counters using the AutoNotifyDecoded stack-based class.
AbstractMediaDecoder::AutoNotifyDecoded a(mDecoder);
// Drop any frames found prior our internal seek target.
while (decoder.mTimeThreshold && decoder.mOutput.Length()) {
RefPtr<MediaData>& output = decoder.mOutput[0];
InternalSeekTarget target = decoder.mTimeThreshold.ref();
- media::TimeUnit time = output->mTime;
+ auto time = output->mTime;
if (time >= target.Time()) {
// We have reached our internal seek target.
decoder.mTimeThreshold.reset();
// We might have dropped some keyframes.
mPreviousDecodedKeyframeTime_us = sNoPreviousDecodedKeyframe;
}
if (time < target.Time() || (target.mDropTarget && target.Contains(time))) {
LOGV("Internal Seeking: Dropping %s frame time:%f wanted:%f (kf:%d)",
@@ -2310,17 +2310,17 @@ MediaFormatReader::Update(TrackType aTra
if (!needsNewDecoder
&& ++decoder.mNumOfConsecutiveError > decoder.mMaxConsecutiveError) {
NotifyError(aTrack, decoder.mError.ref());
return;
}
decoder.mError.reset();
LOG("%s decoded error count %d", TrackTypeToStr(aTrack),
decoder.mNumOfConsecutiveError);
- media::TimeUnit nextKeyframe;
+ TimeUnit nextKeyframe;
if (aTrack == TrackType::kVideoTrack && !decoder.HasInternalSeekPending()
&& NS_SUCCEEDED(
decoder.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe))) {
if (needsNewDecoder) {
ShutdownDecoder(aTrack);
}
SkipVideoDemuxToNextKeyFrame(
decoder.mLastSampleTime.refOr(TimeInterval()).Length());
@@ -2501,31 +2501,31 @@ MediaFormatReader::Reset(TrackType aTrac
void
MediaFormatReader::DropDecodedSamples(TrackType aTrack)
{
MOZ_ASSERT(OnTaskQueue());
auto& decoder = GetDecoderData(aTrack);
size_t lengthDecodedQueue = decoder.mOutput.Length();
if (lengthDecodedQueue && decoder.mTimeThreshold.isSome()) {
- TimeUnit time = decoder.mOutput.LastElement()->mTime;
+ auto time = decoder.mOutput.LastElement()->mTime;
if (time >= decoder.mTimeThreshold.ref().Time()) {
// We would have reached our internal seek target.
decoder.mTimeThreshold.reset();
}
}
decoder.mOutput.Clear();
decoder.mSizeOfQueue -= lengthDecodedQueue;
if (aTrack == TrackInfo::kVideoTrack && mDecoder) {
mDecoder->NotifyDecodedFrames({ 0, 0, lengthDecodedQueue });
}
}
void
-MediaFormatReader::SkipVideoDemuxToNextKeyFrame(media::TimeUnit aTimeThreshold)
+MediaFormatReader::SkipVideoDemuxToNextKeyFrame(TimeUnit aTimeThreshold)
{
MOZ_ASSERT(OnTaskQueue());
LOG("Skipping up to %" PRId64, aTimeThreshold.ToMicroseconds());
// We've reached SkipVideoDemuxToNextKeyFrame when our decoding is late.
// As such we can drop all already decoded samples and discard all pending
// samples.
DropDecodedSamples(TrackInfo::kVideoTrack);
@@ -2706,17 +2706,17 @@ MediaFormatReader::OnSeekFailed(TrackTyp
&& mFallbackSeekTime.isSome()
&& mPendingSeekTime.ref() != mFallbackSeekTime.ref()) {
// We have failed to seek audio where video seeked to earlier.
// Attempt to seek instead to the closest point that we know we have in
// order to limit A/V sync discrepency.
// Ensure we have the most up to date buffered ranges.
UpdateReceivedNewData(TrackType::kAudioTrack);
- Maybe<media::TimeUnit> nextSeekTime;
+ Maybe<TimeUnit> nextSeekTime;
// Find closest buffered time found after video seeked time.
for (const auto& timeRange : mAudio.mTimeRanges) {
if (timeRange.mStart >= mPendingSeekTime.ref()) {
nextSeekTime.emplace(timeRange.mStart);
break;
}
}
if (nextSeekTime.isNothing()
@@ -2740,26 +2740,26 @@ MediaFormatReader::OnSeekFailed(TrackTyp
mSeekPromise.Reject(SeekRejectValue(type, aError), __func__);
}
void
MediaFormatReader::DoVideoSeek()
{
MOZ_ASSERT(mPendingSeekTime.isSome());
LOGV("Seeking video to %" PRId64, mPendingSeekTime.ref().ToMicroseconds());
- media::TimeUnit seekTime = mPendingSeekTime.ref();
+ auto seekTime = mPendingSeekTime.ref();
mVideo.mTrackDemuxer->Seek(seekTime)
->Then(OwnerThread(), __func__, this,
&MediaFormatReader::OnVideoSeekCompleted,
&MediaFormatReader::OnVideoSeekFailed)
->Track(mVideo.mSeekRequest);
}
void
-MediaFormatReader::OnVideoSeekCompleted(media::TimeUnit aTime)
+MediaFormatReader::OnVideoSeekCompleted(TimeUnit aTime)
{
MOZ_ASSERT(OnTaskQueue());
LOGV("Video seeked to %" PRId64, aTime.ToMicroseconds());
mVideo.mSeekRequest.Complete();
mPreviousDecodedKeyframeTime_us = sNoPreviousDecodedKeyframe;
SetVideoDecodeThreshold();
@@ -2823,26 +2823,26 @@ MediaFormatReader::SetVideoDecodeThresho
mVideo.mDecoder->SetSeekThreshold(threshold);
}
void
MediaFormatReader::DoAudioSeek()
{
MOZ_ASSERT(mPendingSeekTime.isSome());
LOGV("Seeking audio to %" PRId64, mPendingSeekTime.ref().ToMicroseconds());
- media::TimeUnit seekTime = mPendingSeekTime.ref();
+ auto seekTime = mPendingSeekTime.ref();
mAudio.mTrackDemuxer->Seek(seekTime)
->Then(OwnerThread(), __func__, this,
&MediaFormatReader::OnAudioSeekCompleted,
&MediaFormatReader::OnAudioSeekFailed)
->Track(mAudio.mSeekRequest);
}
void
-MediaFormatReader::OnAudioSeekCompleted(media::TimeUnit aTime)
+MediaFormatReader::OnAudioSeekCompleted(TimeUnit aTime)
{
MOZ_ASSERT(OnTaskQueue());
LOGV("Audio seeked to %" PRId64, aTime.ToMicroseconds());
mAudio.mSeekRequest.Complete();
mPendingSeekTime.reset();
mSeekPromise.Resolve(aTime, __func__);
}
@@ -2934,31 +2934,31 @@ MediaFormatReader::UpdateBuffered()
if (!mInitDone || !mHasStartTime) {
mBuffered = TimeIntervals();
return;
}
if (HasVideo()) {
mVideo.mTimeRanges = mVideo.mTrackDemuxer->GetBuffered();
bool hasLastEnd;
- media::TimeUnit lastEnd = mVideo.mTimeRanges.GetEnd(&hasLastEnd);
+ auto lastEnd = mVideo.mTimeRanges.GetEnd(&hasLastEnd);
if (hasLastEnd) {
if (mVideo.mLastTimeRangesEnd
&& mVideo.mLastTimeRangesEnd.ref() < lastEnd) {
// New data was added after our previous end, we can clear the EOS flag.
mVideo.mDemuxEOS = false;
ScheduleUpdate(TrackInfo::kVideoTrack);
}
mVideo.mLastTimeRangesEnd = Some(lastEnd);
}
}
if (HasAudio()) {
mAudio.mTimeRanges = mAudio.mTrackDemuxer->GetBuffered();
bool hasLastEnd;
- media::TimeUnit lastEnd = mAudio.mTimeRanges.GetEnd(&hasLastEnd);
+ auto lastEnd = mAudio.mTimeRanges.GetEnd(&hasLastEnd);
if (hasLastEnd) {
if (mAudio.mLastTimeRangesEnd
&& mAudio.mLastTimeRangesEnd.ref() < lastEnd) {
// New data was added after our previous end, we can clear the EOS flag.
mAudio.mDemuxEOS = false;
ScheduleUpdate(TrackInfo::kAudioTrack);
}
mAudio.mLastTimeRangesEnd = Some(lastEnd);
@@ -2970,22 +2970,22 @@ MediaFormatReader::UpdateBuffered()
intervals = media::Intersection(mVideo.mTimeRanges, mAudio.mTimeRanges);
} else if (HasAudio()) {
intervals = mAudio.mTimeRanges;
} else if (HasVideo()) {
intervals = mVideo.mTimeRanges;
}
if (!intervals.Length()
- || intervals.GetStart() == media::TimeUnit::FromMicroseconds(0)) {
+ || intervals.GetStart() == TimeUnit::Zero()) {
// IntervalSet already starts at 0 or is empty, nothing to shift.
mBuffered = intervals;
} else {
mBuffered =
- intervals.Shift(media::TimeUnit() - mInfo.mStartTime);
+ intervals.Shift(TimeUnit::Zero() - mInfo.mStartTime);
}
}
layers::ImageContainer*
MediaFormatReader::GetImageContainer()
{
return mVideoFrameContainer ? mVideoFrameContainer->GetImageContainer()
: nullptr;
--- a/dom/media/VideoUtils.cpp
+++ b/dom/media/VideoUtils.cpp
@@ -29,44 +29,45 @@
#include <stdint.h>
namespace mozilla {
NS_NAMED_LITERAL_CSTRING(kEMEKeySystemClearkey, "org.w3.clearkey");
NS_NAMED_LITERAL_CSTRING(kEMEKeySystemWidevine, "com.widevine.alpha");
using layers::PlanarYCbCrImage;
+using media::TimeUnit;
CheckedInt64 SaferMultDiv(int64_t aValue, uint32_t aMul, uint32_t aDiv) {
int64_t major = aValue / aDiv;
int64_t remainder = aValue % aDiv;
return CheckedInt64(remainder) * aMul / aDiv + CheckedInt64(major) * aMul;
}
// Converts from number of audio frames to microseconds, given the specified
// audio rate.
CheckedInt64 FramesToUsecs(int64_t aFrames, uint32_t aRate) {
return SaferMultDiv(aFrames, USECS_PER_S, aRate);
}
-media::TimeUnit FramesToTimeUnit(int64_t aFrames, uint32_t aRate) {
+TimeUnit FramesToTimeUnit(int64_t aFrames, uint32_t aRate) {
int64_t major = aFrames / aRate;
int64_t remainder = aFrames % aRate;
- return media::TimeUnit::FromMicroseconds(major) * USECS_PER_S +
- (media::TimeUnit::FromMicroseconds(remainder) * USECS_PER_S) / aRate;
+ return TimeUnit::FromMicroseconds(major) * USECS_PER_S +
+ (TimeUnit::FromMicroseconds(remainder) * USECS_PER_S) / aRate;
}
// Converts from microseconds to number of audio frames, given the specified
// audio rate.
CheckedInt64 UsecsToFrames(int64_t aUsecs, uint32_t aRate) {
return SaferMultDiv(aUsecs, aRate, USECS_PER_S);
}
// Format TimeUnit as number of frames at given rate.
-CheckedInt64 TimeUnitToFrames(const media::TimeUnit& aTime, uint32_t aRate) {
+CheckedInt64 TimeUnitToFrames(const TimeUnit& aTime, uint32_t aRate) {
return UsecsToFrames(aTime.ToMicroseconds(), aRate);
}
nsresult SecondsToUsecs(double aSeconds, int64_t& aOutUsecs) {
if (aSeconds * double(USECS_PER_S) > INT64_MAX) {
return NS_ERROR_FAILURE;
}
aOutUsecs = int64_t(aSeconds * double(USECS_PER_S));
@@ -106,18 +107,18 @@ media::TimeIntervals GetEstimatedBuffere
media::TimeIntervals buffered;
// Nothing to cache if the media takes 0us to play.
if (aDurationUsecs <= 0 || !aStream)
return buffered;
// Special case completely cached files. This also handles local files.
if (aStream->IsDataCachedToEndOfResource(0)) {
buffered +=
- media::TimeInterval(media::TimeUnit::FromMicroseconds(0),
- media::TimeUnit::FromMicroseconds(aDurationUsecs));
+ media::TimeInterval(TimeUnit::Zero(),
+ TimeUnit::FromMicroseconds(aDurationUsecs));
return buffered;
}
int64_t totalBytes = aStream->GetLength();
// If we can't determine the total size, pretend that we have nothing
// buffered. This will put us in a state of eternally-low-on-undecoded-data
// which is not great, but about the best we can do.
@@ -130,19 +131,18 @@ media::TimeIntervals GetEstimatedBuffere
// Bytes [startOffset..endOffset] are cached.
NS_ASSERTION(startOffset >= 0, "Integer underflow in GetBuffered");
NS_ASSERTION(endOffset >= 0, "Integer underflow in GetBuffered");
int64_t startUs = BytesToTime(startOffset, totalBytes, aDurationUsecs);
int64_t endUs = BytesToTime(endOffset, totalBytes, aDurationUsecs);
if (startUs != endUs) {
buffered +=
- media::TimeInterval(media::TimeUnit::FromMicroseconds(startUs),
-
- media::TimeUnit::FromMicroseconds(endUs));
+ media::TimeInterval(TimeUnit::FromMicroseconds(startUs),
+ TimeUnit::FromMicroseconds(endUs));
}
startOffset = aStream->GetNextCachedData(endOffset);
}
return buffered;
}
void DownmixStereoToMono(mozilla::AudioDataValue* aBuffer,
uint32_t aFrames)
--- a/dom/media/encoder/TrackEncoder.cpp
+++ b/dom/media/encoder/TrackEncoder.cpp
@@ -304,23 +304,23 @@ VideoTrackEncoder::AppendVideoSegment(co
mLastChunk = chunk;
chunk.mDuration = 0;
TRACK_LOG(LogLevel::Verbose,
("[VideoTrackEncoder]: Got first video chunk after %" PRId64 " ticks.",
nullDuration));
// Adapt to the time before the first frame. This extends the first frame
// from [start, end] to [0, end], but it'll do for now.
- CheckedInt64 diff = FramesToUsecs(nullDuration, mTrackRate);
- if (!diff.isValid()) {
+ auto diff = FramesToTimeUnit(nullDuration, mTrackRate);
+ if (!diff.IsValid()) {
NS_ERROR("null duration overflow");
return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
}
- mLastChunk.mTimeStamp -= TimeDuration::FromMicroseconds(diff.value());
+ mLastChunk.mTimeStamp -= diff.ToTimeDuration();
mLastChunk.mDuration += nullDuration;
}
MOZ_ASSERT(!mLastChunk.IsNull());
if (mLastChunk.CanCombineWithFollowing(chunk) || chunk.IsNull()) {
TRACK_LOG(LogLevel::Verbose,
("[VideoTrackEncoder]: Got dupe or null chunk."));
// This is the same frame as before (or null). We extend the last chunk
--- a/dom/media/fmp4/MP4Demuxer.cpp
+++ b/dom/media/fmp4/MP4Demuxer.cpp
@@ -567,18 +567,17 @@ MP4TrackDemuxer::Reset()
SetNextKeyFrameTime();
}
nsresult
MP4TrackDemuxer::GetNextRandomAccessPoint(media::TimeUnit* aTime)
{
if (mNextKeyframeTime.isNothing()) {
// There's no next key frame.
- *aTime =
- media::TimeUnit::FromMicroseconds(std::numeric_limits<int64_t>::max());
+ *aTime = media::TimeUnit::FromInfinity();
} else {
*aTime = mNextKeyframeTime.value();
}
return NS_OK;
}
RefPtr<MP4TrackDemuxer::SkipAccessPointPromise>
MP4TrackDemuxer::SkipToNextRandomAccessPoint(
--- a/dom/media/gtest/TestMP3Demuxer.cpp
+++ b/dom/media/gtest/TestMP3Demuxer.cpp
@@ -432,61 +432,64 @@ TEST_F(MP3DemuxerTest, Duration) {
if (target.mFileSize <= 0) {
continue;
}
target.mDemuxer->Reset();
RefPtr<MediaRawData> frameData(target.mDemuxer->DemuxSample());
ASSERT_TRUE(frameData);
- const int64_t duration = target.mDemuxer->Duration().ToMicroseconds();
- const int64_t pos = duration + 1e6;
+ const auto duration = target.mDemuxer->Duration();
+ const auto pos = duration + TimeUnit::FromMicroseconds(1e6);
// Attempt to seek 1 second past the end of stream.
- target.mDemuxer->Seek(TimeUnit::FromMicroseconds(pos));
+ target.mDemuxer->Seek(pos);
// The seek should bring us to the end of the stream.
- EXPECT_NEAR(duration, target.mDemuxer->SeekPosition().ToMicroseconds(),
- target.mSeekError * duration);
+ EXPECT_NEAR(duration.ToMicroseconds(),
+ target.mDemuxer->SeekPosition().ToMicroseconds(),
+ target.mSeekError * duration.ToMicroseconds());
// Since we're at the end of the stream, there should be no frames left.
frameData = target.mDemuxer->DemuxSample();
ASSERT_FALSE(frameData);
}
}
TEST_F(MP3DemuxerTest, Seek) {
for (const auto& target: mTargets) {
RefPtr<MediaRawData> frameData(target.mDemuxer->DemuxSample());
ASSERT_TRUE(frameData);
- const int64_t seekTime = TimeUnit::FromSeconds(1).ToMicroseconds();
- int64_t pos = target.mDemuxer->SeekPosition().ToMicroseconds();
+ const auto seekTime = TimeUnit::FromSeconds(1);
+ auto pos = target.mDemuxer->SeekPosition();
while (frameData) {
- EXPECT_NEAR(pos, target.mDemuxer->SeekPosition().ToMicroseconds(),
- target.mSeekError * pos);
+ EXPECT_NEAR(pos.ToMicroseconds(),
+ target.mDemuxer->SeekPosition().ToMicroseconds(),
+ target.mSeekError * pos.ToMicroseconds());
pos += seekTime;
- target.mDemuxer->Seek(TimeUnit::FromMicroseconds(pos));
+ target.mDemuxer->Seek(pos);
frameData = target.mDemuxer->DemuxSample();
}
}
// Seeking should work with in-between resets, too.
for (const auto& target: mTargets) {
target.mDemuxer->Reset();
RefPtr<MediaRawData> frameData(target.mDemuxer->DemuxSample());
ASSERT_TRUE(frameData);
- const int64_t seekTime = TimeUnit::FromSeconds(1).ToMicroseconds();
- int64_t pos = target.mDemuxer->SeekPosition().ToMicroseconds();
+ const auto seekTime = TimeUnit::FromSeconds(1);
+ auto pos = target.mDemuxer->SeekPosition();
while (frameData) {
- EXPECT_NEAR(pos, target.mDemuxer->SeekPosition().ToMicroseconds(),
- target.mSeekError * pos);
+ EXPECT_NEAR(pos.ToMicroseconds(),
+ target.mDemuxer->SeekPosition().ToMicroseconds(),
+ target.mSeekError * pos.ToMicroseconds());
pos += seekTime;
target.mDemuxer->Reset();
- target.mDemuxer->Seek(TimeUnit::FromMicroseconds(pos));
+ target.mDemuxer->Seek(pos);
frameData = target.mDemuxer->DemuxSample();
}
}
}
--- a/dom/media/gtest/TestMP4Demuxer.cpp
+++ b/dom/media/gtest/TestMP4Demuxer.cpp
@@ -11,16 +11,17 @@
#include "mozilla/SharedThreadPool.h"
#include "mozilla/TaskQueue.h"
#include "mozilla/ArrayUtils.h"
#include "MockMediaResource.h"
#include "VideoUtils.h"
using namespace mozilla;
using namespace mp4_demuxer;
+using media::TimeUnit;
class AutoTaskQueue;
#define DO_FAIL [binding]()->void { EXPECT_TRUE(false); binding->mTaskQueue->BeginShutdown(); }
class MP4DemuxerBinding
{
public:
@@ -58,17 +59,17 @@ public:
RefPtr<GenericPromise>
CheckTrackKeyFrame(MediaTrackDemuxer* aTrackDemuxer)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
RefPtr<MediaTrackDemuxer> track = aTrackDemuxer;
RefPtr<MP4DemuxerBinding> binding = this;
- auto time = media::TimeUnit::Invalid();
+ auto time = TimeUnit::Invalid();
while (mIndex < mSamples.Length()) {
uint32_t i = mIndex++;
if (mSamples[i]->mKeyframe) {
time = mSamples[i]->mTime;
break;
}
}
@@ -410,24 +411,24 @@ TEST(MP4Demuxer, GetNextKeyframe)
binding->RunTestAndWait([binding] () {
// Insert a [0,end] buffered range, to simulate Moof's being buffered
// via MSE.
auto len = binding->resource->GetLength();
binding->resource->MockAddBufferedRange(0, len);
// gizmp-frag has two keyframes; one at dts=cts=0, and another at
// dts=cts=1000000. Verify we get expected results.
- media::TimeUnit time;
+ TimeUnit time;
binding->mVideoTrack = binding->mDemuxer->GetTrackDemuxer(TrackInfo::kVideoTrack, 0);
binding->mVideoTrack->Reset();
binding->mVideoTrack->GetNextRandomAccessPoint(&time);
EXPECT_EQ(time.ToMicroseconds(), 0);
binding->mVideoTrack->GetSamples()->Then(binding->mTaskQueue, __func__,
[binding] () {
- media::TimeUnit time;
+ TimeUnit time;
binding->mVideoTrack->GetNextRandomAccessPoint(&time);
EXPECT_EQ(time.ToMicroseconds(), 1000000);
binding->mTaskQueue->BeginShutdown();
},
DO_FAIL
);
});
}
--- a/dom/media/ipc/VideoDecoderParent.cpp
+++ b/dom/media/ipc/VideoDecoderParent.cpp
@@ -15,16 +15,17 @@
#ifdef XP_WIN
#include "WMFDecoderModule.h"
#endif
namespace mozilla {
namespace dom {
using base::Thread;
+using media::TimeUnit;
using namespace ipc;
using namespace layers;
using namespace gfx;
class KnowsCompositorVideo : public layers::KnowsCompositor
{
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(KnowsCompositorVideo, override)
@@ -132,19 +133,19 @@ VideoDecoderParent::RecvInput(const Medi
RefPtr<MediaRawData> data = new MediaRawData(aData.buffer().get<uint8_t>(),
aData.buffer().Size<uint8_t>());
if (aData.buffer().Size<uint8_t>() && !data->Data()) {
// OOM
Error(NS_ERROR_OUT_OF_MEMORY);
return IPC_OK();
}
data->mOffset = aData.base().offset();
- data->mTime = media::TimeUnit::FromMicroseconds(aData.base().time());
- data->mTimecode = media::TimeUnit::FromMicroseconds(aData.base().timecode());
- data->mDuration = media::TimeUnit::FromMicroseconds(aData.base().duration());
+ data->mTime = TimeUnit::FromMicroseconds(aData.base().time());
+ data->mTimecode = TimeUnit::FromMicroseconds(aData.base().timecode());
+ data->mDuration = TimeUnit::FromMicroseconds(aData.base().duration());
data->mKeyframe = aData.base().keyframe();
DeallocShmem(aData.buffer());
RefPtr<VideoDecoderParent> self = this;
mDecoder->Decode(data)->Then(
mManagerTaskQueue, __func__,
[self, this](const MediaDataDecoder::DecodedData& aResults) {
@@ -252,17 +253,17 @@ VideoDecoderParent::RecvShutdown()
return IPC_OK();
}
mozilla::ipc::IPCResult
VideoDecoderParent::RecvSetSeekThreshold(const int64_t& aTime)
{
MOZ_ASSERT(!mDestroyed);
MOZ_ASSERT(OnManagerThread());
- mDecoder->SetSeekThreshold(media::TimeUnit::FromMicroseconds(aTime));
+ mDecoder->SetSeekThreshold(TimeUnit::FromMicroseconds(aTime));
return IPC_OK();
}
void
VideoDecoderParent::ActorDestroy(ActorDestroyReason aWhy)
{
MOZ_ASSERT(!mDestroyed);
MOZ_ASSERT(OnManagerThread());
--- a/dom/media/mediasink/DecodedStream.cpp
+++ b/dom/media/mediasink/DecodedStream.cpp
@@ -17,22 +17,24 @@
#include "MediaStreamListener.h"
#include "OutputStreamManager.h"
#include "SharedBuffer.h"
#include "VideoSegment.h"
#include "VideoUtils.h"
namespace mozilla {
+using media::TimeUnit;
+
/*
* A container class to make it easier to pass the playback info all the
* way to DecodedStreamGraphListener from DecodedStream.
*/
struct PlaybackInfoInit {
- media::TimeUnit mStartTime;
+ TimeUnit mStartTime;
MediaInfo mInfo;
};
class DecodedStreamGraphListener : public MediaStreamListener {
public:
DecodedStreamGraphListener(MediaStream* aStream,
MozPromiseHolder<GenericPromise>&& aPromise,
AbstractThread* aMainThread)
@@ -139,18 +141,18 @@ public:
/* The following group of fields are protected by the decoder's monitor
* and can be read or written on any thread.
*/
// Count of audio frames written to the stream
int64_t mAudioFramesWritten;
// mNextVideoTime is the end timestamp for the last packet sent to the stream.
// Therefore video packets starting at or after this time need to be copied
// to the output stream.
- media::TimeUnit mNextVideoTime;
- media::TimeUnit mNextAudioTime;
+ TimeUnit mNextVideoTime;
+ TimeUnit mNextAudioTime;
// The last video image sent to the stream. Useful if we need to replicate
// the image.
RefPtr<layers::Image> mLastVideoImage;
gfx::IntSize mLastVideoImageDisplaySize;
bool mHaveSentFinish;
bool mHaveSentFinishAudio;
bool mHaveSentFinishVideo;
@@ -289,23 +291,23 @@ DecodedStream::OnEnded(TrackType aType)
return mFinishPromise;
} else if (aType == TrackInfo::kVideoTrack && mInfo.HasVideo()) {
return mFinishPromise;
}
return nullptr;
}
void
-DecodedStream::Start(const media::TimeUnit& aStartTime, const MediaInfo& aInfo)
+DecodedStream::Start(const TimeUnit& aStartTime, const MediaInfo& aInfo)
{
AssertOwnerThread();
MOZ_ASSERT(mStartTime.isNothing(), "playback already started.");
mStartTime.emplace(aStartTime);
- mLastOutputTime = media::TimeUnit::Zero();
+ mLastOutputTime = TimeUnit::Zero();
mInfo = aInfo;
mPlaying = true;
ConnectListener();
class R : public Runnable {
typedef MozPromiseHolder<GenericPromise> Promise;
public:
R(PlaybackInfoInit&& aInit, Promise&& aPromise,
@@ -443,17 +445,17 @@ DecodedStream::SetPlaybackRate(double aP
void
DecodedStream::SetPreservesPitch(bool aPreservesPitch)
{
AssertOwnerThread();
mParams.mPreservesPitch = aPreservesPitch;
}
static void
-SendStreamAudio(DecodedStreamData* aStream, const media::TimeUnit& aStartTime,
+SendStreamAudio(DecodedStreamData* aStream, const TimeUnit& aStartTime,
AudioData* aData, AudioSegment* aOutput, uint32_t aRate,
const PrincipalHandle& aPrincipalHandle)
{
// The amount of audio frames that is used to fuzz rounding errors.
static const int64_t AUDIO_FUZZ_FRAMES = 1;
MOZ_ASSERT(aData);
AudioData* audio = aData;
@@ -536,18 +538,18 @@ DecodedStream::SendAudio(double aVolume,
sourceStream->EndTrack(audioTrackId);
mData->mHaveSentFinishAudio = true;
}
}
static void
WriteVideoToMediaStream(MediaStream* aStream,
layers::Image* aImage,
- const media::TimeUnit& aEnd,
- const media::TimeUnit& aStart,
+ const TimeUnit& aEnd,
+ const TimeUnit& aStart,
const mozilla::gfx::IntSize& aIntrinsicSize,
const TimeStamp& aTimeStamp,
VideoSegment* aOutput,
const PrincipalHandle& aPrincipalHandle)
{
RefPtr<layers::Image> image = aImage;
auto end = aStream->MicrosecondsToStreamTimeRoundDown(aEnd.ToMicroseconds());
auto start = aStream->MicrosecondsToStreamTimeRoundDown(aStart.ToMicroseconds());
@@ -706,33 +708,33 @@ DecodedStream::SendData()
(!mInfo.HasVideo() || mVideoQueue.IsFinished());
if (finished && !mData->mHaveSentFinish) {
mData->mHaveSentFinish = true;
mData->mStream->Finish();
}
}
-media::TimeUnit
+TimeUnit
DecodedStream::GetEndTime(TrackType aType) const
{
AssertOwnerThread();
if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio() && mData) {
auto t = mStartTime.ref() + FramesToTimeUnit(
mData->mAudioFramesWritten, mInfo.mAudio.mRate);
if (t.IsValid()) {
return t;
}
} else if (aType == TrackInfo::kVideoTrack && mData) {
return mData->mNextVideoTime;
}
- return media::TimeUnit::Zero();
+ return TimeUnit::Zero();
}
-media::TimeUnit
+TimeUnit
DecodedStream::GetPosition(TimeStamp* aTimeStamp) const
{
AssertOwnerThread();
// This is only called after MDSM starts playback. So mStartTime is
// guaranteed to be something.
MOZ_ASSERT(mStartTime.isSome());
if (aTimeStamp) {
*aTimeStamp = TimeStamp::Now();
--- a/dom/media/mediasource/MediaSourceDecoder.cpp
+++ b/dom/media/mediasource/MediaSourceDecoder.cpp
@@ -100,22 +100,21 @@ MediaSourceDecoder::GetSeekable()
// time in union ranges and an end time equal to the highest end time in
// union ranges and abort these steps.
seekable +=
media::TimeInterval(unionRanges.GetStart(), unionRanges.GetEnd());
return seekable;
}
if (buffered.Length()) {
- seekable +=
- media::TimeInterval(media::TimeUnit::FromSeconds(0), buffered.GetEnd());
+ seekable += media::TimeInterval(TimeUnit::Zero(), buffered.GetEnd());
}
} else {
- seekable += media::TimeInterval(media::TimeUnit::FromSeconds(0),
- media::TimeUnit::FromSeconds(duration));
+ seekable += media::TimeInterval(TimeUnit::Zero(),
+ TimeUnit::FromSeconds(duration));
}
MSE_DEBUG("ranges=%s", DumpTimeRanges(seekable).get());
return seekable;
}
media::TimeIntervals
MediaSourceDecoder::GetBuffered()
{
@@ -125,32 +124,31 @@ MediaSourceDecoder::GetBuffered()
NS_WARNING("MediaSource element isn't attached");
return media::TimeIntervals::Invalid();
}
dom::SourceBufferList* sourceBuffers = mMediaSource->ActiveSourceBuffers();
if (!sourceBuffers) {
// Media source object is shutting down.
return TimeIntervals();
}
- media::TimeUnit highestEndTime;
+ TimeUnit highestEndTime;
nsTArray<media::TimeIntervals> activeRanges;
media::TimeIntervals buffered;
for (uint32_t i = 0; i < sourceBuffers->Length(); i++) {
bool found;
dom::SourceBuffer* sb = sourceBuffers->IndexedGetter(i, found);
MOZ_ASSERT(found);
activeRanges.AppendElement(sb->GetTimeIntervals());
highestEndTime =
std::max(highestEndTime, activeRanges.LastElement().GetEnd());
}
- buffered +=
- media::TimeInterval(media::TimeUnit::FromMicroseconds(0), highestEndTime);
+ buffered += media::TimeInterval(TimeUnit::Zero(), highestEndTime);
for (auto& range : activeRanges) {
if (mEnded && range.Length()) {
// Set the end time on the last range to highestEndTime by adding a
// new range spanning the current end time to highestEndTime, which
// Normalize() will then merge with the old last range.
range +=
media::TimeInterval(range.GetEnd(), highestEndTime);
@@ -284,17 +282,17 @@ MediaSourceDecoder::NextFrameBufferedSta
// Next frame hasn't been decoded yet.
// Use the buffered range to consider if we have the next frame available.
TimeUnit currentPosition = TimeUnit::FromMicroseconds(CurrentPosition());
TimeIntervals buffered = GetBuffered();
buffered.SetFuzz(MediaSourceDemuxer::EOS_FUZZ / 2);
TimeInterval interval(
currentPosition,
currentPosition
- + media::TimeUnit::FromMicroseconds(DEFAULT_NEXT_FRAME_AVAILABLE_BUFFERED));
+ + TimeUnit::FromMicroseconds(DEFAULT_NEXT_FRAME_AVAILABLE_BUFFERED));
return buffered.ContainsStrict(ClampIntervalToEnd(interval))
? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
: MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
}
bool
MediaSourceDecoder::CanPlayThrough()
{
--- a/dom/media/mediasource/MediaSourceDemuxer.cpp
+++ b/dom/media/mediasource/MediaSourceDemuxer.cpp
@@ -24,20 +24,17 @@ using media::TimeIntervals;
MediaSourceDemuxer::MediaSourceDemuxer(AbstractThread* aAbstractMainThread)
: mTaskQueue(new AutoTaskQueue(GetMediaThreadPool(MediaThreadType::PLAYBACK),
/* aSupportsTailDispatch = */ false))
, mMonitor("MediaSourceDemuxer")
{
MOZ_ASSERT(NS_IsMainThread());
}
-// Due to inaccuracies in determining buffer end
-// frames (Bug 1065207). This value is based on videos seen in the wild.
-const TimeUnit MediaSourceDemuxer::EOS_FUZZ =
- media::TimeUnit::FromMicroseconds(500000);
+constexpr TimeUnit MediaSourceDemuxer::EOS_FUZZ;
RefPtr<MediaSourceDemuxer::InitPromise>
MediaSourceDemuxer::Init()
{
RefPtr<MediaSourceDemuxer> self = this;
return InvokeAsync(GetTaskQueue(), __func__,
[self](){
if (self->ScanSourceBuffersForContent()) {
@@ -316,20 +313,20 @@ MediaSourceTrackDemuxer::MediaSourceTrac
UniquePtr<TrackInfo>
MediaSourceTrackDemuxer::GetInfo() const
{
return mParent->GetTrackInfo(mType)->Clone();
}
RefPtr<MediaSourceTrackDemuxer::SeekPromise>
-MediaSourceTrackDemuxer::Seek(const media::TimeUnit& aTime)
+MediaSourceTrackDemuxer::Seek(const TimeUnit& aTime)
{
MOZ_ASSERT(mParent, "Called after BreackCycle()");
- return InvokeAsync<media::TimeUnit&&>(
+ return InvokeAsync<TimeUnit&&>(
mParent->GetTaskQueue(), this, __func__,
&MediaSourceTrackDemuxer::DoSeek, aTime);
}
RefPtr<MediaSourceTrackDemuxer::SamplesPromise>
MediaSourceTrackDemuxer::GetSamples(int32_t aNumSamples)
{
MOZ_ASSERT(mParent, "Called after BreackCycle()");
@@ -341,39 +338,39 @@ void
MediaSourceTrackDemuxer::Reset()
{
MOZ_ASSERT(mParent, "Called after BreackCycle()");
RefPtr<MediaSourceTrackDemuxer> self = this;
nsCOMPtr<nsIRunnable> task =
NS_NewRunnableFunction([self] () {
self->mNextSample.reset();
self->mReset = true;
- self->mManager->Seek(self->mType, TimeUnit(), TimeUnit());
+ self->mManager->Seek(self->mType, TimeUnit::Zero(), TimeUnit::Zero());
{
MonitorAutoLock mon(self->mMonitor);
self->mNextRandomAccessPoint = self->mManager->GetNextRandomAccessPoint(
self->mType, MediaSourceDemuxer::EOS_FUZZ);
}
});
mParent->GetTaskQueue()->Dispatch(task.forget());
}
nsresult
-MediaSourceTrackDemuxer::GetNextRandomAccessPoint(media::TimeUnit* aTime)
+MediaSourceTrackDemuxer::GetNextRandomAccessPoint(TimeUnit* aTime)
{
MonitorAutoLock mon(mMonitor);
*aTime = mNextRandomAccessPoint;
return NS_OK;
}
RefPtr<MediaSourceTrackDemuxer::SkipAccessPointPromise>
MediaSourceTrackDemuxer::SkipToNextRandomAccessPoint(
- const media::TimeUnit& aTimeThreshold)
+ const TimeUnit& aTimeThreshold)
{
- return InvokeAsync<media::TimeUnit&&>(
+ return InvokeAsync<TimeUnit&&>(
mParent->GetTaskQueue(), this, __func__,
&MediaSourceTrackDemuxer::DoSkipToNextRandomAccessPoint,
aTimeThreshold);
}
media::TimeIntervals
MediaSourceTrackDemuxer::GetBuffered()
{
@@ -388,30 +385,29 @@ MediaSourceTrackDemuxer::BreakCycles()
NS_NewRunnableFunction([self]() {
self->mParent = nullptr;
self->mManager = nullptr;
} );
mParent->GetTaskQueue()->Dispatch(task.forget());
}
RefPtr<MediaSourceTrackDemuxer::SeekPromise>
-MediaSourceTrackDemuxer::DoSeek(const media::TimeUnit& aTime)
+MediaSourceTrackDemuxer::DoSeek(const TimeUnit& aTime)
{
TimeIntervals buffered = mManager->Buffered(mType);
// Fuzz factor represents a +/- threshold. So when seeking it allows the gap
// to be twice as big as the fuzz value. We only want to allow EOS_FUZZ gap.
buffered.SetFuzz(MediaSourceDemuxer::EOS_FUZZ / 2);
- TimeUnit seekTime = std::max(aTime - mPreRoll, TimeUnit::FromMicroseconds(0));
+ TimeUnit seekTime = std::max(aTime - mPreRoll, TimeUnit::Zero());
if (mManager->IsEnded() && seekTime >= buffered.GetEnd()) {
// We're attempting to seek past the end time. Cap seekTime so that we seek
// to the last sample instead.
seekTime =
- std::max(mManager->HighestStartTime(mType) - mPreRoll,
- TimeUnit::FromMicroseconds(0));
+ std::max(mManager->HighestStartTime(mType) - mPreRoll, TimeUnit::Zero());
}
if (!buffered.ContainsWithStrictEnd(seekTime)) {
if (!buffered.ContainsWithStrictEnd(aTime)) {
// We don't have the data to seek to.
return SeekPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA,
__func__);
}
// Theoretically we should reject the promise with WAITING_FOR_DATA,
@@ -422,17 +418,17 @@ MediaSourceTrackDemuxer::DoSeek(const me
TimeIntervals::IndexType index = buffered.Find(aTime);
MOZ_ASSERT(index != TimeIntervals::NoIndex);
seekTime = buffered[index].mStart;
}
seekTime = mManager->Seek(mType, seekTime, MediaSourceDemuxer::EOS_FUZZ);
MediaResult result = NS_OK;
RefPtr<MediaRawData> sample =
mManager->GetSample(mType,
- media::TimeUnit(),
+ TimeUnit::Zero(),
result);
MOZ_ASSERT(NS_SUCCEEDED(result) && sample);
mNextSample = Some(sample);
mReset = false;
{
MonitorAutoLock mon(mMonitor);
mNextRandomAccessPoint =
mManager->GetNextRandomAccessPoint(mType, MediaSourceDemuxer::EOS_FUZZ);
@@ -448,17 +444,17 @@ MediaSourceTrackDemuxer::DoGetSamples(in
// we are about to retrieve is still available.
TimeIntervals buffered = mManager->Buffered(mType);
buffered.SetFuzz(MediaSourceDemuxer::EOS_FUZZ / 2);
if (!buffered.Length() && mManager->IsEnded()) {
return SamplesPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_END_OF_STREAM,
__func__);
}
- if (!buffered.ContainsWithStrictEnd(TimeUnit::FromMicroseconds(0))) {
+ if (!buffered.ContainsWithStrictEnd(TimeUnit::Zero())) {
return SamplesPromise::CreateAndReject(
NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA, __func__);
}
mReset = false;
}
RefPtr<MediaRawData> sample;
if (mNextSample) {
sample = mNextSample.ref();
@@ -484,17 +480,17 @@ MediaSourceTrackDemuxer::DoGetSamples(in
mNextRandomAccessPoint =
mManager->GetNextRandomAccessPoint(mType, MediaSourceDemuxer::EOS_FUZZ);
}
return SamplesPromise::CreateAndResolve(samples, __func__);
}
RefPtr<MediaSourceTrackDemuxer::SkipAccessPointPromise>
MediaSourceTrackDemuxer::DoSkipToNextRandomAccessPoint(
- const media::TimeUnit& aTimeThreadshold)
+ const TimeUnit& aTimeThreadshold)
{
uint32_t parsed = 0;
// Ensure that the data we are about to skip to is still available.
TimeIntervals buffered = mManager->Buffered(mType);
buffered.SetFuzz(MediaSourceDemuxer::EOS_FUZZ / 2);
if (buffered.ContainsWithStrictEnd(aTimeThreadshold)) {
bool found;
parsed = mManager->SkipToNextRandomAccessPoint(mType,
--- a/dom/media/mediasource/MediaSourceDemuxer.h
+++ b/dom/media/mediasource/MediaSourceDemuxer.h
@@ -53,17 +53,20 @@ public:
// Returns a string describing the state of the MediaSource internal
// buffered data. Used for debugging purposes.
void GetMozDebugReaderData(nsACString& aString);
void AddSizeOfResources(MediaSourceDecoder::ResourceSizes* aSizes);
// Gap allowed between frames.
- static const media::TimeUnit EOS_FUZZ;
+ // Due to inaccuracies in determining buffer end
+ // frames (Bug 1065207). This value is based on videos seen in the wild.
+ static constexpr media::TimeUnit EOS_FUZZ =
+ media::TimeUnit::FromMicroseconds(500000);
private:
~MediaSourceDemuxer();
friend class MediaSourceTrackDemuxer;
// Scan source buffers and update information.
bool ScanSourceBuffersForContent();
TrackBuffersManager* GetManager(TrackInfo::TrackType aType);
TrackInfo* GetTrackInfo(TrackInfo::TrackType);
--- a/dom/media/ogg/OggCodecState.cpp
+++ b/dom/media/ogg/OggCodecState.cpp
@@ -31,16 +31,18 @@
#endif
#endif
namespace mozilla {
extern LazyLogModule gMediaDecoderLog;
#define LOG(type, msg) MOZ_LOG(gMediaDecoderLog, type, msg)
+using media::TimeUnit;
+
/** Decoder base class for Ogg-encapsulated streams. */
OggCodecState*
OggCodecState::Create(ogg_page* aPage)
{
NS_ASSERTION(ogg_page_bos(aPage), "Only call on BOS page!");
nsAutoPtr<OggCodecState> codecState;
if (aPage->body_len > 6 && memcmp(aPage->body+1, "theora", 6) == 0) {
codecState = new TheoraState(aPage);
@@ -254,19 +256,19 @@ OggCodecState::PacketOutAsMediaRawData()
}
int64_t end_tstamp = Time(packet->granulepos);
NS_ASSERTION(end_tstamp >= 0, "timestamp invalid");
int64_t duration = PacketDuration(packet.get());
NS_ASSERTION(duration >= 0, "duration invalid");
- sample->mTimecode = media::TimeUnit::FromMicroseconds(packet->granulepos);
- sample->mTime = media::TimeUnit::FromMicroseconds(end_tstamp - duration);
- sample->mDuration = media::TimeUnit::FromMicroseconds(duration);
+ sample->mTimecode = TimeUnit::FromMicroseconds(packet->granulepos);
+ sample->mTime = TimeUnit::FromMicroseconds(end_tstamp - duration);
+ sample->mDuration = TimeUnit::FromMicroseconds(duration);
sample->mKeyframe = IsKeyframe(packet.get());
sample->mEOS = packet->e_o_s;
return sample.forget();
}
nsresult
OggCodecState::PageIn(ogg_page* aPage)
--- a/dom/media/ogg/OggDemuxer.cpp
+++ b/dom/media/ogg/OggDemuxer.cpp
@@ -183,17 +183,17 @@ bool
OggDemuxer::HaveStartTime(TrackInfo::TrackType aType)
{
return OggState(aType).mStartTime.isSome();
}
int64_t
OggDemuxer::StartTime(TrackInfo::TrackType aType)
{
- return OggState(aType).mStartTime.refOr(TimeUnit::FromMicroseconds(0)).ToMicroseconds();
+ return OggState(aType).mStartTime.refOr(TimeUnit::Zero()).ToMicroseconds();
}
RefPtr<OggDemuxer::InitPromise>
OggDemuxer::Init()
{
int ret = ogg_sync_init(OggSyncState(TrackInfo::kAudioTrack));
if (ret != 0) {
return InitPromise::CreateAndReject(NS_ERROR_OUT_OF_MEMORY, __func__);
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -24,16 +24,18 @@
#include "nsThreadUtils.h"
typedef mozilla::layers::Image Image;
typedef mozilla::layers::PlanarYCbCrImage PlanarYCbCrImage;
namespace mozilla {
+using media::TimeUnit;
+
/**
* FFmpeg calls back to this function with a list of pixel formats it supports.
* We choose a pixel format that we support and return it.
* For now, we just look for YUV420P, YUVJ420P and YUV444 as those are the only
* only non-HW accelerated format supported by FFmpeg's H264 and VP9 decoder.
*/
static AVPixelFormat
ChoosePixelFormat(AVCodecContext* aCodecContext, const AVPixelFormat* aFormats)
@@ -337,17 +339,17 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
break;
}
}
RefPtr<VideoData> v =
VideoData::CreateAndCopyData(mInfo,
mImageContainer,
aSample->mOffset,
pts,
- media::TimeUnit::FromMicroseconds(duration),
+ TimeUnit::FromMicroseconds(duration),
b,
!!mFrame->key_frame,
-1,
mInfo.ScaledImageRect(mFrame->width,
mFrame->height));
if (!v) {
return MediaResult(NS_ERROR_OUT_OF_MEMORY,
@@ -359,17 +361,17 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
}
return NS_OK;
}
RefPtr<MediaDataDecoder::DecodePromise>
FFmpegVideoDecoder<LIBAV_VER>::ProcessDrain()
{
RefPtr<MediaRawData> empty(new MediaRawData());
- empty->mTimecode = media::TimeUnit::FromMicroseconds(mLastInputDts);
+ empty->mTimecode = TimeUnit::FromMicroseconds(mLastInputDts);
bool gotFrame = false;
DecodedData results;
while (NS_SUCCEEDED(DoDecode(empty, &gotFrame, results)) && gotFrame) {
}
return DecodePromise::CreateAndResolve(Move(results), __func__);
}
RefPtr<MediaDataDecoder::FlushPromise>
--- a/dom/media/platforms/wmf/WMFUtils.cpp
+++ b/dom/media/platforms/wmf/WMFUtils.cpp
@@ -19,16 +19,18 @@
#ifdef WMF_MUST_DEFINE_AAC_MFT_CLSID
// Some SDK versions don't define the AAC decoder CLSID.
// {32D186A7-218F-4C75-8876-DD77273A8999}
DEFINE_GUID(CLSID_CMSAACDecMFT, 0x32D186A7, 0x218F, 0x4C75, 0x88, 0x76, 0xDD, 0x77, 0x27, 0x3A, 0x89, 0x99);
#endif
namespace mozilla {
+using media::TimeUnit;
+
HRESULT
HNsToFrames(int64_t aHNs, uint32_t aRate, int64_t* aOutFrames)
{
MOZ_ASSERT(aOutFrames);
const int64_t HNS_PER_S = USECS_PER_S * 10;
CheckedInt<int64_t> i = aHNs;
i *= aRate;
i /= HNS_PER_S;
@@ -60,33 +62,33 @@ GetDefaultStride(IMFMediaType *aType, ui
}
int32_t
MFOffsetToInt32(const MFOffset& aOffset)
{
return int32_t(aOffset.value + (aOffset.fract / 65536.0f));
}
-media::TimeUnit
+TimeUnit
GetSampleDuration(IMFSample* aSample)
{
- NS_ENSURE_TRUE(aSample, media::TimeUnit::Invalid());
+ NS_ENSURE_TRUE(aSample, TimeUnit::Invalid());
int64_t duration = 0;
aSample->GetSampleDuration(&duration);
- return media::TimeUnit::FromMicroseconds(HNsToUsecs(duration));
+ return TimeUnit::FromMicroseconds(HNsToUsecs(duration));
}
-media::TimeUnit
+TimeUnit
GetSampleTime(IMFSample* aSample)
{
- NS_ENSURE_TRUE(aSample, media::TimeUnit::Invalid());
+ NS_ENSURE_TRUE(aSample, TimeUnit::Invalid());
LONGLONG timestampHns = 0;
HRESULT hr = aSample->GetSampleTime(×tampHns);
- NS_ENSURE_TRUE(SUCCEEDED(hr), media::TimeUnit::Invalid());
- return media::TimeUnit::FromMicroseconds(HNsToUsecs(timestampHns));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), TimeUnit::Invalid());
+ return TimeUnit::FromMicroseconds(HNsToUsecs(timestampHns));
}
// Gets the sub-region of the video frame that should be displayed.
// See: http://msdn.microsoft.com/en-us/library/windows/desktop/bb530115(v=vs.85).aspx
HRESULT
GetPictureRegion(IMFMediaType* aMediaType, nsIntRect& aOutPictureRegion)
{
// Determine if "pan and scan" is enabled for this media. If it is, we
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -36,16 +36,17 @@
#include "nsWindowsHelpers.h"
#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
using mozilla::layers::Image;
using mozilla::layers::IMFYCbCrImage;
using mozilla::layers::LayerManager;
using mozilla::layers::LayersBackend;
+using mozilla::media::TimeUnit;
#if WINVER_MAXVER < 0x0A00
// Windows 10+ SDK has VP80 and VP90 defines
const GUID MFVideoFormat_VP80 =
{
0x30385056,
0x0000,
0x0010,
@@ -821,19 +822,19 @@ WMFVideoMFTManager::CreateBasicVideoFram
// V plane (Cr)
b.mPlanes[2].mData = data + y_size;
b.mPlanes[2].mStride = halfStride;
b.mPlanes[2].mHeight = halfHeight;
b.mPlanes[2].mWidth = halfWidth;
b.mPlanes[2].mOffset = 0;
b.mPlanes[2].mSkip = 0;
- media::TimeUnit pts = GetSampleTime(aSample);
+ TimeUnit pts = GetSampleTime(aSample);
NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
- media::TimeUnit duration = GetSampleDuration(aSample);
+ TimeUnit duration = GetSampleDuration(aSample);
NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
nsIntRect pictureRegion = mVideoInfo.ScaledImageRect(videoWidth, videoHeight);
LayersBackend backend = GetCompositorBackendType(mKnowsCompositor);
if (backend != LayersBackend::LAYERS_D3D11) {
RefPtr<VideoData> v =
VideoData::CreateAndCopyData(mVideoInfo,
mImageContainer,
@@ -892,19 +893,19 @@ WMFVideoMFTManager::CreateD3DVideoFrame(
mVideoInfo.ScaledImageRect(mImageSize.width, mImageSize.height);
RefPtr<Image> image;
hr = mDXVA2Manager->CopyToImage(aSample,
pictureRegion,
getter_AddRefs(image));
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
NS_ENSURE_TRUE(image, E_FAIL);
- media::TimeUnit pts = GetSampleTime(aSample);
+ TimeUnit pts = GetSampleTime(aSample);
NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
- media::TimeUnit duration = GetSampleDuration(aSample);
+ TimeUnit duration = GetSampleDuration(aSample);
NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
RefPtr<VideoData> v = VideoData::CreateFromImage(mVideoInfo.mDisplay,
aStreamOffset,
pts.ToMicroseconds(),
duration,
image.forget(),
false,
-1);
@@ -926,18 +927,18 @@ WMFVideoMFTManager::Output(int64_t aStre
int typeChangeCount = 0;
bool wasDraining = mDraining;
int64_t sampleCount = mSamplesCount;
if (wasDraining) {
mSamplesCount = 0;
mDraining = false;
}
- media::TimeUnit pts;
- media::TimeUnit duration;
+ TimeUnit pts;
+ TimeUnit duration;
// Loop until we decode a sample, or an unexpected error that we can't
// handle occurs.
while (true) {
hr = mDecoder->Output(&sample);
if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
return MF_E_TRANSFORM_NEED_MORE_INPUT;
}
@@ -988,24 +989,24 @@ WMFVideoMFTManager::Output(int64_t aStre
}
continue;
}
pts = GetSampleTime(sample);
duration = GetSampleDuration(sample);
if (!pts.IsValid() || !duration.IsValid()) {
return E_FAIL;
}
- if (wasDraining && sampleCount == 1 && pts == media::TimeUnit()) {
+ if (wasDraining && sampleCount == 1 && pts == TimeUnit::Zero()) {
// WMF is unable to calculate a duration if only a single sample
// was parsed. Additionally, the pts always comes out at 0 under those
// circumstances.
// Seeing that we've only fed the decoder a single frame, the pts
// and duration are known, it's of the last sample.
- pts = media::TimeUnit::FromMicroseconds(mLastTime);
- duration = media::TimeUnit::FromMicroseconds(mLastDuration);
+ pts = TimeUnit::FromMicroseconds(mLastTime);
+ duration = TimeUnit::FromMicroseconds(mLastDuration);
}
if (mSeekTargetThreshold.isSome()) {
if ((pts + duration) < mSeekTargetThreshold.ref()) {
LOG("Dropping video frame which pts is smaller than seek target.");
// It is necessary to clear the pointer to release the previous output
// buffer.
sample = nullptr;
continue;
--- a/dom/media/webm/WebMDemuxer.cpp
+++ b/dom/media/webm/WebMDemuxer.cpp
@@ -34,16 +34,17 @@
#include <numeric>
#define WEBM_DEBUG(arg, ...) MOZ_LOG(gMediaDemuxerLog, mozilla::LogLevel::Debug, ("WebMDemuxer(%p)::%s: " arg, this, __func__, ##__VA_ARGS__))
extern mozilla::LazyLogModule gMediaDemuxerLog;
namespace mozilla {
using namespace gfx;
+using media::TimeUnit;
LazyLogModule gNesteggLog("Nestegg");
// How far ahead will we look when searching future keyframe. In microseconds.
// This value is based on what appears to be a reasonable value as most webm
// files encountered appear to have keyframes located < 4s.
#define MAX_LOOK_AHEAD 10000000
@@ -385,17 +386,17 @@ WebMDemuxer::ReadMetadata()
break;
case NESTEGG_VIDEO_STEREO_RIGHT_LEFT:
mInfo.mVideo.mStereoMode = StereoMode::RIGHT_LEFT;
break;
}
uint64_t duration = 0;
r = nestegg_duration(context, &duration);
if (!r) {
- mInfo.mVideo.mDuration = media::TimeUnit::FromNanoseconds(duration);
+ mInfo.mVideo.mDuration = TimeUnit::FromNanoseconds(duration);
}
mInfo.mVideo.mCrypto = GetTrackCrypto(TrackInfo::kVideoTrack, track);
if (mInfo.mVideo.mCrypto.mValid) {
mCrypto.AddInitData(NS_LITERAL_STRING("webm"),
mInfo.mVideo.mCrypto.mKeyId);
}
} else if (type == NESTEGG_TRACK_AUDIO && !mHasAudio) {
nestegg_audio_params params;
@@ -408,18 +409,17 @@ WebMDemuxer::ReadMetadata()
mHasAudio = true;
mAudioCodec = nestegg_track_codec_id(context, track);
if (mAudioCodec == NESTEGG_CODEC_VORBIS) {
mInfo.mAudio.mMimeType = "audio/vorbis";
} else if (mAudioCodec == NESTEGG_CODEC_OPUS) {
mInfo.mAudio.mMimeType = "audio/opus";
OpusDataDecoder::AppendCodecDelay(
mInfo.mAudio.mCodecSpecificConfig,
- media::TimeUnit::FromNanoseconds(params.codec_delay)
- .ToMicroseconds());
+ TimeUnit::FromNanoseconds(params.codec_delay).ToMicroseconds());
}
mSeekPreroll = params.seek_preroll;
mInfo.mAudio.mRate = params.rate;
mInfo.mAudio.mChannels = params.channels;
unsigned int nheaders = 0;
r = nestegg_track_codec_data_count(context, track, &nheaders);
if (r == -1) {
@@ -452,17 +452,17 @@ WebMDemuxer::ReadMetadata()
}
else {
mInfo.mAudio.mCodecSpecificConfig->AppendElements(headers[0],
headerLens[0]);
}
uint64_t duration = 0;
r = nestegg_duration(context, &duration);
if (!r) {
- mInfo.mAudio.mDuration = media::TimeUnit::FromNanoseconds(duration);
+ mInfo.mAudio.mDuration = TimeUnit::FromNanoseconds(duration);
}
mInfo.mAudio.mCrypto = GetTrackCrypto(TrackInfo::kAudioTrack, track);
if (mInfo.mAudio.mCrypto.mValid) {
mCrypto.AddInitData(NS_LITERAL_STRING("webm"),
mInfo.mAudio.mCrypto.mKeyId);
}
}
}
@@ -717,32 +717,32 @@ WebMDemuxer::GetNextPacket(TrackInfo::Tr
}
} else {
sample = new MediaRawData(data, length);
if (length && !sample->Data()) {
// OOM.
return NS_ERROR_OUT_OF_MEMORY;
}
}
- sample->mTimecode = media::TimeUnit::FromMicroseconds(tstamp);
- sample->mTime = media::TimeUnit::FromMicroseconds(tstamp);
- sample->mDuration = media::TimeUnit::FromMicroseconds(next_tstamp - tstamp);
+ sample->mTimecode = TimeUnit::FromMicroseconds(tstamp);
+ sample->mTime = TimeUnit::FromMicroseconds(tstamp);
+ sample->mDuration = TimeUnit::FromMicroseconds(next_tstamp - tstamp);
sample->mOffset = holder->Offset();
sample->mKeyframe = isKeyframe;
if (discardPadding && i == count - 1) {
CheckedInt64 discardFrames;
if (discardPadding < 0) {
// This is an invalid value as discard padding should never be negative.
// Set to maximum value so that the decoder will reject it as it's
// greater than the number of frames available.
discardFrames = INT32_MAX;
WEBM_DEBUG("Invalid negative discard padding");
} else {
discardFrames = TimeUnitToFrames(
- media::TimeUnit::FromNanoseconds(discardPadding), mInfo.mAudio.mRate);
+ TimeUnit::FromNanoseconds(discardPadding), mInfo.mAudio.mRate);
}
if (discardFrames.isValid()) {
sample->mDiscardPadding = discardFrames.value();
}
}
if (packetEncryption == NESTEGG_PACKET_HAS_SIGNAL_BYTE_UNENCRYPTED
|| packetEncryption == NESTEGG_PACKET_HAS_SIGNAL_BYTE_ENCRYPTED
@@ -921,47 +921,47 @@ WebMDemuxer::PushAudioPacket(NesteggPack
void
WebMDemuxer::PushVideoPacket(NesteggPacketHolder* aItem)
{
mVideoPackets.PushFront(aItem);
}
nsresult
WebMDemuxer::SeekInternal(TrackInfo::TrackType aType,
- const media::TimeUnit& aTarget)
+ const TimeUnit& aTarget)
{
EnsureUpToDateIndex();
uint32_t trackToSeek = mHasVideo ? mVideoTrack : mAudioTrack;
uint64_t target = aTarget.ToNanoseconds();
if (NS_FAILED(Reset(aType))) {
return NS_ERROR_FAILURE;
}
if (mSeekPreroll) {
uint64_t startTime = 0;
if (!mBufferedState->GetStartTime(&startTime)) {
startTime = 0;
}
WEBM_DEBUG("Seek Target: %f",
- media::TimeUnit::FromNanoseconds(target).ToSeconds());
+ TimeUnit::FromNanoseconds(target).ToSeconds());
if (target < mSeekPreroll || target - mSeekPreroll < startTime) {
target = startTime;
} else {
target -= mSeekPreroll;
}
WEBM_DEBUG("SeekPreroll: %f StartTime: %f Adjusted Target: %f",
- media::TimeUnit::FromNanoseconds(mSeekPreroll).ToSeconds(),
- media::TimeUnit::FromNanoseconds(startTime).ToSeconds(),
- media::TimeUnit::FromNanoseconds(target).ToSeconds());
+ TimeUnit::FromNanoseconds(mSeekPreroll).ToSeconds(),
+ TimeUnit::FromNanoseconds(startTime).ToSeconds(),
+ TimeUnit::FromNanoseconds(target).ToSeconds());
}
int r = nestegg_track_seek(Context(aType), trackToSeek, target);
if (r == -1) {
WEBM_DEBUG("track_seek for track %u to %f failed, r=%d", trackToSeek,
- media::TimeUnit::FromNanoseconds(target).ToSeconds(), r);
+ TimeUnit::FromNanoseconds(target).ToSeconds(), r);
// Try seeking directly based on cluster information in memory.
int64_t offset = 0;
bool rv = mBufferedState->GetOffsetForTime(target, &offset);
if (!rv) {
WEBM_DEBUG("mBufferedState->GetOffsetForTime failed too");
return NS_ERROR_FAILURE;
}
@@ -998,36 +998,36 @@ WebMDemuxer::GetBuffered()
}
uint64_t duration = 0;
uint64_t startOffset = 0;
if (!nestegg_duration(Context(TrackInfo::kVideoTrack), &duration)) {
if(mBufferedState->GetStartTime(&startOffset)) {
duration += startOffset;
}
WEBM_DEBUG("Duration: %f StartTime: %f",
- media::TimeUnit::FromNanoseconds(duration).ToSeconds(),
- media::TimeUnit::FromNanoseconds(startOffset).ToSeconds());
+ TimeUnit::FromNanoseconds(duration).ToSeconds(),
+ TimeUnit::FromNanoseconds(startOffset).ToSeconds());
}
for (uint32_t index = 0; index < ranges.Length(); index++) {
uint64_t start, end;
bool rv = mBufferedState->CalculateBufferedForRange(ranges[index].mStart,
ranges[index].mEnd,
&start, &end);
if (rv) {
NS_ASSERTION(startOffset <= start,
"startOffset negative or larger than start time");
if (duration && end > duration) {
WEBM_DEBUG("limit range to duration, end: %f duration: %f",
- media::TimeUnit::FromNanoseconds(end).ToSeconds(),
- media::TimeUnit::FromNanoseconds(duration).ToSeconds());
+ TimeUnit::FromNanoseconds(end).ToSeconds(),
+ TimeUnit::FromNanoseconds(duration).ToSeconds());
end = duration;
}
- media::TimeUnit startTime = media::TimeUnit::FromNanoseconds(start);
- media::TimeUnit endTime = media::TimeUnit::FromNanoseconds(end);
+ auto startTime = TimeUnit::FromNanoseconds(start);
+ auto endTime = TimeUnit::FromNanoseconds(end);
WEBM_DEBUG("add range %f-%f", startTime.ToSeconds(), endTime.ToSeconds());
buffered += media::TimeInterval(startTime, endTime);
}
}
return buffered;
}
bool WebMDemuxer::GetOffsetForTime(uint64_t aTime, int64_t* aOffset)
@@ -1056,29 +1056,29 @@ WebMTrackDemuxer::~WebMTrackDemuxer()
UniquePtr<TrackInfo>
WebMTrackDemuxer::GetInfo() const
{
return mInfo->Clone();
}
RefPtr<WebMTrackDemuxer::SeekPromise>
-WebMTrackDemuxer::Seek(const media::TimeUnit& aTime)
+WebMTrackDemuxer::Seek(const TimeUnit& aTime)
{
// Seeks to aTime. Upon success, SeekPromise will be resolved with the
// actual time seeked to. Typically the random access point time
- media::TimeUnit seekTime = aTime;
+ auto seekTime = aTime;
mSamples.Reset();
mParent->SeekInternal(mType, aTime);
nsresult rv = mParent->GetNextPacket(mType, &mSamples);
if (NS_FAILED(rv)) {
if (rv == NS_ERROR_DOM_MEDIA_END_OF_STREAM) {
// Ignore the error for now, the next GetSample will be rejected with EOS.
- return SeekPromise::CreateAndResolve(media::TimeUnit(), __func__);
+ return SeekPromise::CreateAndResolve(TimeUnit::Zero(), __func__);
}
return SeekPromise::CreateAndReject(rv, __func__);
}
mNeedKeyframe = true;
// Check what time we actually seeked to.
if (mSamples.GetSize() > 0) {
const RefPtr<MediaRawData>& sample = mSamples.First();
@@ -1135,17 +1135,17 @@ WebMTrackDemuxer::GetSamples(int32_t aNu
void
WebMTrackDemuxer::SetNextKeyFrameTime()
{
if (mType != TrackInfo::kVideoTrack || mParent->IsMediaSource()) {
return;
}
- auto frameTime = media::TimeUnit::Invalid();
+ auto frameTime = TimeUnit::Invalid();
mNextKeyframeTime.reset();
MediaRawDataQueue skipSamplesQueue;
bool foundKeyframe = false;
while (!foundKeyframe && mSamples.GetSize()) {
RefPtr<MediaRawData> sample = mSamples.PopFront();
if (sample->mKeyframe) {
@@ -1221,31 +1221,29 @@ WebMTrackDemuxer::UpdateSamples(nsTArray
}
if (mNextKeyframeTime.isNothing()
|| aSamples.LastElement()->mTime >= mNextKeyframeTime.value()) {
SetNextKeyFrameTime();
}
}
nsresult
-WebMTrackDemuxer::GetNextRandomAccessPoint(media::TimeUnit* aTime)
+WebMTrackDemuxer::GetNextRandomAccessPoint(TimeUnit* aTime)
{
if (mNextKeyframeTime.isNothing()) {
// There's no next key frame.
- *aTime =
- media::TimeUnit::FromMicroseconds(std::numeric_limits<int64_t>::max());
+ *aTime = TimeUnit::FromInfinity();
} else {
*aTime = mNextKeyframeTime.ref();
}
return NS_OK;
}
RefPtr<WebMTrackDemuxer::SkipAccessPointPromise>
-WebMTrackDemuxer::SkipToNextRandomAccessPoint(
- const media::TimeUnit& aTimeThreshold)
+WebMTrackDemuxer::SkipToNextRandomAccessPoint(const TimeUnit& aTimeThreshold)
{
uint32_t parsed = 0;
bool found = false;
RefPtr<MediaRawData> sample;
nsresult rv = NS_OK;
WEBM_DEBUG("TimeThreshold: %f", aTimeThreshold.ToSeconds());
while (!found && NS_SUCCEEDED((rv = NextSample(sample)))) {
@@ -1277,17 +1275,17 @@ WebMTrackDemuxer::GetBuffered()
void
WebMTrackDemuxer::BreakCycles()
{
mParent = nullptr;
}
int64_t
-WebMTrackDemuxer::GetEvictionOffset(const media::TimeUnit& aTime)
+WebMTrackDemuxer::GetEvictionOffset(const TimeUnit& aTime)
{
int64_t offset;
if (!mParent->GetOffsetForTime(aTime.ToNanoseconds(), &offset)) {
return 0;
}
return offset;
}