--- a/dom/media/ADTSDemuxer.cpp
+++ b/dom/media/ADTSDemuxer.cpp
@@ -748,22 +748,22 @@ ADTSTrackDemuxer::GetNextFrame(const adt
if (read != length) {
ADTSLOG("GetNext() Exit read=%u frame->Size()=%" PRIuSIZE, read, frame->Size());
return nullptr;
}
UpdateState(aFrame);
frame->mTime = Duration(mFrameIndex - 1).ToMicroseconds();
- frame->mDuration = Duration(1).ToMicroseconds();
+ frame->mDuration = Duration(1);
frame->mTimecode = frame->mTime;
frame->mKeyframe = true;
MOZ_ASSERT(frame->mTime >= 0);
- MOZ_ASSERT(frame->mDuration > 0);
+ MOZ_ASSERT(frame->mDuration.IsPositive());
ADTSLOGV("GetNext() End mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64
" mFrameIndex=%" PRId64 " mTotalFrameLen=%" PRIu64
" mSamplesPerFrame=%d mSamplesPerSecond=%d mChannels=%d",
mOffset, mNumParsedFrames, mFrameIndex, mTotalFrameLen,
mSamplesPerFrame, mSamplesPerSecond, mChannels);
return frame.forget();
--- a/dom/media/MP3Demuxer.cpp
+++ b/dom/media/MP3Demuxer.cpp
@@ -600,22 +600,22 @@ MP3TrackDemuxer::GetNextFrame(const Medi
if (read != aRange.Length()) {
MP3LOG("GetNext() Exit read=%u frame->Size()=%" PRIuSIZE, read, frame->Size());
return nullptr;
}
UpdateState(aRange);
frame->mTime = Duration(mFrameIndex - 1).ToMicroseconds();
- frame->mDuration = Duration(1).ToMicroseconds();
+ frame->mDuration = Duration(1);
frame->mTimecode = frame->mTime;
frame->mKeyframe = true;
MOZ_ASSERT(frame->mTime >= 0);
- MOZ_ASSERT(frame->mDuration > 0);
+ MOZ_ASSERT(frame->mDuration.IsPositive());
if (mNumParsedFrames == 1) {
// First frame parsed, let's read VBR info if available.
ByteReader reader(frame->Data(), frame->Size());
mParser.ParseVBRHeader(&reader);
mFirstFrameOffset = frame->mOffset;
}
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -17,16 +17,17 @@
#include <stdint.h>
namespace mozilla {
using namespace mozilla::gfx;
using layers::ImageContainer;
using layers::PlanarYCbCrImage;
using layers::PlanarYCbCrData;
+using media::TimeUnit;
const char* AudioData::sTypeName = "audio";
const char* VideoData::sTypeName = "video";
void
AudioData::EnsureAudioBuffer()
{
if (mAudioBuffer)
@@ -168,17 +169,17 @@ VideoData::VideoData(int64_t aOffset,
int64_t aTimecode,
IntSize aDisplay,
layers::ImageContainer::FrameID aFrameID)
: MediaData(VIDEO_DATA, aOffset, aTime, aDuration, 1)
, mDisplay(aDisplay)
, mFrameID(aFrameID)
, mSentToCompositor(false)
{
- NS_ASSERTION(mDuration >= 0, "Frame must have non-negative duration.");
+ MOZ_ASSERT(!mDuration.IsNegative(), "Frame must have non-negative duration.");
mKeyframe = aKeyframe;
mTimecode = aTimecode;
}
VideoData::~VideoData()
{
}
@@ -221,29 +222,29 @@ VideoData::SizeOfIncludingThis(MallocSiz
return size;
}
void
VideoData::UpdateDuration(int64_t aDuration)
{
MOZ_ASSERT(aDuration >= 0);
- mDuration = aDuration;
+ mDuration = TimeUnit::FromMicroseconds(aDuration);
}
void
VideoData::UpdateTimestamp(int64_t aTimestamp)
{
MOZ_ASSERT(aTimestamp >= 0);
int64_t updatedDuration = GetEndTime() - aTimestamp;
MOZ_ASSERT(updatedDuration >= 0);
mTime = aTimestamp;
- mDuration = updatedDuration;
+ mDuration = TimeUnit::FromMicroseconds(updatedDuration);
}
/* static */
bool VideoData::SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
const VideoInfo& aInfo,
const YCbCrBuffer &aBuffer,
const IntRect& aPicture,
bool aCopyData)
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -14,16 +14,17 @@
#include "nsIMemoryReporter.h"
#include "SharedBuffer.h"
#include "mozilla/RefPtr.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/UniquePtrExtensions.h"
#include "nsTArray.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/PodOperations.h"
+#include "TimeUnits.h"
namespace mozilla {
namespace layers {
class Image;
class ImageContainer;
} // namespace layers
@@ -290,17 +291,17 @@ public:
int64_t aOffset,
int64_t aTimestamp,
int64_t aDuration,
uint32_t aFrames)
: mType(aType)
, mOffset(aOffset)
, mTime(aTimestamp)
, mTimecode(aTimestamp)
- , mDuration(aDuration)
+ , mDuration(media::TimeUnit::FromMicroseconds(aDuration))
, mFrames(aFrames)
, mKeyframe(false)
{
}
// Type of contained data.
const Type mType;
@@ -310,24 +311,24 @@ public:
// Start time of sample, in microseconds.
int64_t mTime;
// Codec specific internal time code. For Ogg based codecs this is the
// granulepos.
int64_t mTimecode;
// Duration of sample, in microseconds.
- int64_t mDuration;
+ media::TimeUnit mDuration;
// Amount of frames for contained data.
const uint32_t mFrames;
bool mKeyframe;
- int64_t GetEndTime() const { return mTime + mDuration; }
+ int64_t GetEndTime() const { return mTime + mDuration.ToMicroseconds(); }
bool AdjustForStartTime(int64_t aStartTime)
{
mTime = mTime - aStartTime;
return mTime >= 0;
}
template <typename ReturnType>
@@ -345,17 +346,16 @@ public:
}
protected:
MediaData(Type aType, uint32_t aFrames)
: mType(aType)
, mOffset(0)
, mTime(0)
, mTimecode(0)
- , mDuration(0)
, mFrames(aFrames)
, mKeyframe(false)
{
}
virtual ~MediaData() { }
};
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -1719,17 +1719,18 @@ MediaFormatReader::OnAudioDemuxCompleted
void
MediaFormatReader::NotifyNewOutput(
TrackType aTrack, const MediaDataDecoder::DecodedData& aResults)
{
MOZ_ASSERT(OnTaskQueue());
auto& decoder = GetDecoderData(aTrack);
for (auto& sample : aResults) {
LOGV("Received new %s sample time:%" PRId64 " duration:%" PRId64,
- TrackTypeToStr(aTrack), sample->mTime, sample->mDuration);
+ TrackTypeToStr(aTrack), sample->mTime,
+ sample->mDuration.ToMicroseconds());
decoder.mOutput.AppendElement(sample);
decoder.mNumSamplesOutput++;
decoder.mNumOfConsecutiveError = 0;
}
LOG("Done processing new %s samples", TrackTypeToStr(aTrack));
ScheduleUpdate(aTrack);
}
--- a/dom/media/TimeUnits.h
+++ b/dom/media/TimeUnits.h
@@ -160,16 +160,20 @@ public:
bool IsInfinite() const {
return mValue.value() == INT64_MAX;
}
bool IsPositive() const {
return mValue.value() > 0;
}
+ bool IsNegative() const {
+ return mValue.value() < 0;
+ }
+
bool operator == (const TimeUnit& aOther) const {
MOZ_ASSERT(IsValid() && aOther.IsValid());
return mValue.value() == aOther.mValue.value();
}
bool operator != (const TimeUnit& aOther) const {
MOZ_ASSERT(IsValid() && aOther.IsValid());
return mValue.value() != aOther.mValue.value();
}
--- a/dom/media/flac/FlacDemuxer.cpp
+++ b/dom/media/flac/FlacDemuxer.cpp
@@ -976,23 +976,23 @@ FlacTrackDemuxer::GetNextFrame(const fla
const uint32_t read = Read(frameWriter->Data(), offset, size);
if (read != size) {
LOG("GetNextFrame() Exit read=%u frame->Size=%" PRIuSIZE, read, frame->Size());
return nullptr;
}
frame->mTime = aFrame.Time().ToMicroseconds();
- frame->mDuration = aFrame.Duration().ToMicroseconds();
+ frame->mDuration = aFrame.Duration();
frame->mTimecode = frame->mTime;
frame->mOffset = aFrame.Offset();
frame->mKeyframe = true;
MOZ_ASSERT(frame->mTime >= 0);
- MOZ_ASSERT(frame->mDuration >= 0);
+ MOZ_ASSERT(!frame->mDuration.IsNegative());
return frame.forget();
}
int32_t
FlacTrackDemuxer::Read(uint8_t* aBuffer, int64_t aOffset, int32_t aSize)
{
uint32_t read = 0;
--- a/dom/media/fmp4/MP4Demuxer.cpp
+++ b/dom/media/fmp4/MP4Demuxer.cpp
@@ -457,27 +457,29 @@ MP4TrackDemuxer::GetNextSample()
case mp4_demuxer::H264::FrameType::OTHER:
{
bool keyframe = type == mp4_demuxer::H264::FrameType::I_FRAME;
if (sample->mKeyframe != keyframe) {
NS_WARNING(nsPrintfCString("Frame incorrectly marked as %skeyframe "
"@ pts:%" PRId64 " dur:%" PRId64
" dts:%" PRId64,
keyframe ? "" : "non-", sample->mTime,
- sample->mDuration, sample->mTimecode)
+ sample->mDuration.ToMicroseconds(),
+ sample->mTimecode)
.get());
sample->mKeyframe = keyframe;
}
break;
}
case mp4_demuxer::H264::FrameType::INVALID:
NS_WARNING(
nsPrintfCString("Invalid H264 frame @ pts:%" PRId64 " dur:%" PRId64
" dts:%" PRId64,
- sample->mTime, sample->mDuration, sample->mTimecode)
+ sample->mTime, sample->mDuration.ToMicroseconds(),
+ sample->mTimecode)
.get());
// We could reject the sample now, however demuxer errors are fatal.
// So we keep the invalid frame, relying on the H264 decoder to
// handle the error later.
// TODO: make demuxer errors non-fatal.
break;
}
}
--- a/dom/media/gmp/ChromiumCDMParent.cpp
+++ b/dom/media/gmp/ChromiumCDMParent.cpp
@@ -186,17 +186,17 @@ ChromiumCDMParent::InitCDMInputBuffer(gm
return false;
}
memcpy(shmem.get<uint8_t>(), aSample->Data(), aSample->Size());
aBuffer = gmp::CDMInputBuffer(shmem,
crypto.mKeyId,
crypto.mIV,
aSample->mTime,
- aSample->mDuration,
+ aSample->mDuration.ToMicroseconds(),
crypto.mPlainSizes,
crypto.mEncryptedSizes,
crypto.mValid);
return true;
}
bool
ChromiumCDMParent::SendBufferToCDM(uint32_t aSizeInBytes)
--- a/dom/media/ipc/VideoDecoderChild.cpp
+++ b/dom/media/ipc/VideoDecoderChild.cpp
@@ -225,17 +225,17 @@ VideoDecoderChild::Decode(MediaRawData*
NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__);
}
memcpy(buffer.get<uint8_t>(), aSample->Data(), aSample->Size());
MediaRawDataIPDL sample(MediaDataIPDL(aSample->mOffset,
aSample->mTime,
aSample->mTimecode,
- aSample->mDuration,
+ aSample->mDuration.ToMicroseconds(),
aSample->mFrames,
aSample->mKeyframe),
buffer);
SendInput(sample);
return mDecodePromise.Ensure(__func__);
}
RefPtr<MediaDataDecoder::FlushPromise>
--- a/dom/media/ipc/VideoDecoderParent.cpp
+++ b/dom/media/ipc/VideoDecoderParent.cpp
@@ -134,17 +134,17 @@ VideoDecoderParent::RecvInput(const Medi
if (aData.buffer().Size<uint8_t>() && !data->Data()) {
// OOM
Error(NS_ERROR_OUT_OF_MEMORY);
return IPC_OK();
}
data->mOffset = aData.base().offset();
data->mTime = aData.base().time();
data->mTimecode = aData.base().timecode();
- data->mDuration = aData.base().duration();
+ data->mDuration = media::TimeUnit::FromMicroseconds(aData.base().duration());
data->mKeyframe = aData.base().keyframe();
DeallocShmem(aData.buffer());
RefPtr<VideoDecoderParent> self = this;
mDecoder->Decode(data)->Then(
mManagerTaskQueue, __func__,
[self, this](const MediaDataDecoder::DecodedData& aResults) {
@@ -182,17 +182,18 @@ VideoDecoderParent::ProcessDecodedData(
if (texture && !texture->IsAddedToCompositableClient()) {
texture->InitIPDLActor(mKnowsCompositor);
texture->SetAddedToCompositableClient();
}
VideoDataIPDL output(
MediaDataIPDL(data->mOffset, data->mTime, data->mTimecode,
- data->mDuration, data->mFrames, data->mKeyframe),
+ data->mDuration.ToMicroseconds(),
+ data->mFrames, data->mKeyframe),
video->mDisplay,
texture ? texture->GetSize() : IntSize(),
texture ? mParent->StoreImage(video->mImage, texture)
: SurfaceDescriptorGPUVideo(0),
video->mFrameID);
Unused << SendOutput(output);
}
}
--- a/dom/media/mediasource/TrackBuffersManager.cpp
+++ b/dom/media/mediasource/TrackBuffersManager.cpp
@@ -1460,17 +1460,17 @@ TrackBuffersManager::ProcessFrames(Track
// We only apply the leeway with the default append window start of 0
// otherwise do as per spec.
TimeInterval targetWindow =
mAppendWindow.mStart != TimeUnit::FromSeconds(0)
? mAppendWindow
: TimeInterval(mAppendWindow.mStart, mAppendWindow.mEnd,
trackBuffer.mLastFrameDuration.isSome()
? trackBuffer.mLongestFrameDuration
- : TimeUnit::FromMicroseconds(aSamples[0]->mDuration));
+ : aSamples[0]->mDuration);
TimeIntervals samplesRange;
uint32_t sizeNewSamples = 0;
TrackBuffer samples; // array that will contain the frames to be added
// to our track buffer.
// We assume that no frames are contiguous within a media segment and as such
// don't need to check for discontinuity except for the first frame and should
@@ -1486,17 +1486,17 @@ TrackBuffersManager::ProcessFrames(Track
for (auto& sample : aSamples) {
SAMPLE_DEBUG("Processing %s frame(pts:%" PRId64 " end:%" PRId64 ", dts:%" PRId64 ", duration:%" PRId64 ", "
"kf:%d)",
aTrackData.mInfo->mMimeType.get(),
sample->mTime,
sample->GetEndTime(),
sample->mTimecode,
- sample->mDuration,
+ sample->mDuration.ToMicroseconds(),
sample->mKeyframe);
const TimeUnit sampleEndTime =
TimeUnit::FromMicroseconds(sample->GetEndTime());
if (sampleEndTime > aTrackData.mLastParsedEndTime) {
aTrackData.mLastParsedEndTime = sampleEndTime;
}
@@ -1522,17 +1522,17 @@ TrackBuffersManager::ProcessFrames(Track
// Let decode timestamp be a double precision floating point representation of the coded frame's decode timestamp in seconds.
// 2. Let frame duration be a double precision floating point representation of the coded frame's duration in seconds.
// Step 3 is performed earlier or when a discontinuity has been detected.
// 4. If timestampOffset is not 0, then run the following steps:
TimeUnit sampleTime = TimeUnit::FromMicroseconds(sample->mTime);
TimeUnit sampleTimecode = TimeUnit::FromMicroseconds(sample->mTimecode);
- TimeUnit sampleDuration = TimeUnit::FromMicroseconds(sample->mDuration);
+ TimeUnit sampleDuration = sample->mDuration;
TimeUnit timestampOffset = mSourceBufferAttributes->GetTimestampOffset();
TimeInterval sampleInterval =
mSourceBufferAttributes->mGenerateTimestamps
? TimeInterval(timestampOffset, timestampOffset + sampleDuration)
: TimeInterval(timestampOffset + sampleTime,
timestampOffset + sampleTime + sampleDuration);
TimeUnit decodeTimestamp = mSourceBufferAttributes->mGenerateTimestamps
@@ -1865,17 +1865,17 @@ TrackBuffersManager::RemoveFrames(const
for (uint32_t i = lastRemovedIndex + 1; i < data.Length(); i++) {
const RefPtr<MediaRawData>& sample = data[i];
if (sample->mKeyframe) {
break;
}
lastRemovedIndex = i;
}
- int64_t maxSampleDuration = 0;
+ TimeUnit maxSampleDuration;
uint32_t sizeRemoved = 0;
TimeIntervals removedIntervals;
for (uint32_t i = firstRemovedIndex.ref(); i <= lastRemovedIndex; i++) {
const RefPtr<MediaRawData> sample = data[i];
TimeInterval sampleInterval =
TimeInterval(TimeUnit::FromMicroseconds(sample->mTime),
TimeUnit::FromMicroseconds(sample->GetEndTime()));
removedIntervals += sampleInterval;
@@ -1926,17 +1926,17 @@ TrackBuffersManager::RemoveFrames(const
}
}
// Update our buffered range to exclude the range just removed.
aTrackData.mBufferedRanges -= removedIntervals;
// Recalculate sanitized buffered ranges.
aTrackData.mSanitizedBufferedRanges = aTrackData.mBufferedRanges;
- aTrackData.mSanitizedBufferedRanges.SetFuzz(TimeUnit::FromMicroseconds(maxSampleDuration/2));
+ aTrackData.mSanitizedBufferedRanges.SetFuzz(maxSampleDuration/2);
data.RemoveElementsAt(firstRemovedIndex.ref(),
lastRemovedIndex - firstRemovedIndex.ref() + 1);
if (aIntervals.GetEnd() >= aTrackData.mHighestStartTimestamp) {
// The sample with the highest presentation time got removed.
// Rescan the trackbuffer to determine the new one.
int64_t highestStartTime = 0;
@@ -2239,17 +2239,17 @@ TrackBuffersManager::SkipToNextRandomAcc
break;
}
if (sample->mKeyframe &&
sample->mTime >= aTimeThreadshold.ToMicroseconds()) {
aFound = true;
break;
}
nextSampleTimecode =
- TimeUnit::FromMicroseconds(sample->mTimecode + sample->mDuration);
+ TimeUnit::FromMicroseconds(sample->mTimecode) + sample->mDuration;
nextSampleTime = TimeUnit::FromMicroseconds(sample->GetEndTime());
parsed++;
}
// Adjust the next demux time and index so that the next call to
// SkipToNextRandomAccessPoint will not count again the parsed sample as
// skipped.
if (aFound) {
@@ -2355,17 +2355,17 @@ TrackBuffersManager::GetSample(TrackInfo
return nullptr;
}
if (p->mKeyframe) {
UpdateEvictionIndex(trackData, trackData.mNextGetSampleIndex.ref());
}
trackData.mNextGetSampleIndex.ref()++;
// Estimate decode timestamp and timestamp of the next sample.
TimeUnit nextSampleTimecode =
- TimeUnit::FromMicroseconds(sample->mTimecode + sample->mDuration);
+ TimeUnit::FromMicroseconds(sample->mTimecode) + sample->mDuration;
TimeUnit nextSampleTime =
TimeUnit::FromMicroseconds(sample->GetEndTime());
const MediaRawData* nextSample =
GetSample(aTrack,
trackData.mNextGetSampleIndex.ref(),
nextSampleTimecode,
nextSampleTime,
aFuzz);
@@ -2379,18 +2379,19 @@ TrackBuffersManager::GetSample(TrackInfo
// Next sample isn't available yet. Use estimates.
trackData.mNextSampleTimecode = nextSampleTimecode;
trackData.mNextSampleTime = nextSampleTime;
}
aResult = NS_OK;
return p.forget();
}
- if (trackData.mNextSampleTimecode.ToMicroseconds() >
- track.LastElement()->mTimecode + track.LastElement()->mDuration) {
+ if (trackData.mNextSampleTimecode >
+ TimeUnit::FromMicroseconds(track.LastElement()->mTimecode)
+ + track.LastElement()->mDuration) {
// The next element is past our last sample. We're done.
trackData.mNextGetSampleIndex = Some(uint32_t(track.Length()));
aResult = NS_ERROR_DOM_MEDIA_END_OF_STREAM;
return nullptr;
}
// Our previous index has been overwritten, attempt to find the new one.
int32_t pos = FindCurrentPosition(aTrack, aFuzz);
@@ -2412,17 +2413,17 @@ TrackBuffersManager::GetSample(TrackInfo
// Find the previous keyframe to calculate the evictable amount.
int32_t i = pos;
for (; !track[i]->mKeyframe; i--) {
}
UpdateEvictionIndex(trackData, i);
trackData.mNextGetSampleIndex = Some(uint32_t(pos)+1);
trackData.mNextSampleTimecode =
- TimeUnit::FromMicroseconds(sample->mTimecode + sample->mDuration);
+ TimeUnit::FromMicroseconds(sample->mTimecode) + sample->mDuration;
trackData.mNextSampleTime =
TimeUnit::FromMicroseconds(sample->GetEndTime());
aResult = NS_OK;
return p.forget();
}
int32_t
TrackBuffersManager::FindCurrentPosition(TrackInfo::TrackType aTrack,
@@ -2432,33 +2433,33 @@ TrackBuffersManager::FindCurrentPosition
auto& trackData = GetTracksData(aTrack);
const TrackBuffer& track = GetTrackBuffer(aTrack);
// Perform an exact search first.
for (uint32_t i = 0; i < track.Length(); i++) {
const RefPtr<MediaRawData>& sample = track[i];
TimeInterval sampleInterval{
TimeUnit::FromMicroseconds(sample->mTimecode),
- TimeUnit::FromMicroseconds(sample->mTimecode + sample->mDuration)};
+ TimeUnit::FromMicroseconds(sample->mTimecode) + sample->mDuration};
if (sampleInterval.ContainsStrict(trackData.mNextSampleTimecode)) {
return i;
}
if (sampleInterval.mStart > trackData.mNextSampleTimecode) {
// Samples are ordered by timecode. There's no need to search
// any further.
break;
}
}
for (uint32_t i = 0; i < track.Length(); i++) {
const RefPtr<MediaRawData>& sample = track[i];
TimeInterval sampleInterval{
TimeUnit::FromMicroseconds(sample->mTimecode),
- TimeUnit::FromMicroseconds(sample->mTimecode + sample->mDuration),
+ TimeUnit::FromMicroseconds(sample->mTimecode) + sample->mDuration,
aFuzz};
if (sampleInterval.ContainsWithStrictEnd(trackData.mNextSampleTimecode)) {
return i;
}
if (sampleInterval.mStart - aFuzz > trackData.mNextSampleTimecode) {
// Samples are ordered by timecode. There's no need to search
// any further.
@@ -2509,17 +2510,17 @@ TrackBuffersManager::GetNextRandomAccess
GetSample(aTrack, i, nextSampleTimecode, nextSampleTime, aFuzz);
if (!sample) {
break;
}
if (sample->mKeyframe) {
return TimeUnit::FromMicroseconds(sample->mTime);
}
nextSampleTimecode =
- TimeUnit::FromMicroseconds(sample->mTimecode + sample->mDuration);
+ TimeUnit::FromMicroseconds(sample->mTimecode) + sample->mDuration;
nextSampleTime = TimeUnit::FromMicroseconds(sample->GetEndTime());
}
return TimeUnit::FromInfinity();
}
void
TrackBuffersManager::TrackData::AddSizeOfResources(MediaSourceDecoder::ResourceSizes* aSizes) const
{
--- a/dom/media/ogg/OggCodecState.cpp
+++ b/dom/media/ogg/OggCodecState.cpp
@@ -256,17 +256,17 @@ OggCodecState::PacketOutAsMediaRawData()
int64_t end_tstamp = Time(packet->granulepos);
NS_ASSERTION(end_tstamp >= 0, "timestamp invalid");
int64_t duration = PacketDuration(packet.get());
NS_ASSERTION(duration >= 0, "duration invalid");
sample->mTimecode = packet->granulepos;
sample->mTime = end_tstamp - duration;
- sample->mDuration = duration;
+ sample->mDuration = media::TimeUnit::FromMicroseconds(duration);
sample->mKeyframe = IsKeyframe(packet.get());
sample->mEOS = packet->e_o_s;
return sample.forget();
}
nsresult
OggCodecState::PageIn(ogg_page* aPage)
--- a/dom/media/platforms/agnostic/BlankDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/BlankDecoderModule.cpp
@@ -71,34 +71,35 @@ BlankVideoDataCreator::Create(MediaRawDa
buffer.mPlanes[2].mWidth = (mFrameWidth + 1) / 2;
buffer.mPlanes[2].mOffset = 0;
buffer.mPlanes[2].mSkip = 0;
return VideoData::CreateAndCopyData(mInfo,
mImageContainer,
aSample->mOffset,
aSample->mTime,
- aSample->mDuration,
+ aSample->mDuration.ToMicroseconds(),
buffer,
aSample->mKeyframe,
aSample->mTime,
mPicture);
}
BlankAudioDataCreator::BlankAudioDataCreator(uint32_t aChannelCount, uint32_t aSampleRate)
: mFrameSum(0), mChannelCount(aChannelCount), mSampleRate(aSampleRate)
{
}
already_AddRefed<MediaData>
BlankAudioDataCreator::Create(MediaRawData* aSample)
{
// Convert duration to frames. We add 1 to duration to account for
// rounding errors, so we get a consistent tone.
- CheckedInt64 frames = UsecsToFrames(aSample->mDuration+1, mSampleRate);
+ CheckedInt64 frames = UsecsToFrames(
+ aSample->mDuration.ToMicroseconds()+1, mSampleRate);
if (!frames.isValid()
|| !mChannelCount
|| !mSampleRate
|| frames.value() > (UINT32_MAX / mChannelCount)) {
return nullptr;
}
AlignedAudioBuffer samples(frames.value() * mChannelCount);
if (!samples) {
@@ -111,17 +112,17 @@ BlankAudioDataCreator::Create(MediaRawDa
float f = sin(2 * pi * noteHz * mFrameSum / mSampleRate);
for (unsigned c = 0; c < mChannelCount; c++) {
samples[i * mChannelCount + c] = AudioDataValue(f);
}
mFrameSum++;
}
RefPtr<AudioData> data(new AudioData(aSample->mOffset,
aSample->mTime,
- aSample->mDuration,
+ aSample->mDuration.ToMicroseconds(),
uint32_t(frames.value()),
Move(samples),
mChannelCount,
mSampleRate));
return data.forget();
}
already_AddRefed<MediaDataDecoder>
--- a/dom/media/platforms/agnostic/NullDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/NullDecoderModule.cpp
@@ -13,17 +13,17 @@ public:
NullVideoDataCreator() {}
already_AddRefed<MediaData> Create(MediaRawData* aSample) override
{
// Create a dummy VideoData with no image. This gives us something to
// send to media streams if necessary.
RefPtr<VideoData> v(new VideoData(aSample->mOffset,
aSample->mTime,
- aSample->mDuration,
+ aSample->mDuration.ToMicroseconds(),
aSample->mKeyframe,
aSample->mTimecode,
gfx::IntSize(),
0));
return v.forget();
}
};
--- a/dom/media/platforms/agnostic/TheoraDecoder.cpp
+++ b/dom/media/platforms/agnostic/TheoraDecoder.cpp
@@ -167,17 +167,17 @@ TheoraDecoder::ProcessDecode(MediaRawDat
VideoInfo info;
info.mDisplay = mInfo.mDisplay;
RefPtr<VideoData> v =
VideoData::CreateAndCopyData(info,
mImageContainer,
aSample->mOffset,
aSample->mTime,
- aSample->mDuration,
+ aSample->mDuration.ToMicroseconds(),
b,
aSample->mKeyframe,
aSample->mTimecode,
mInfo.ScaledImageRect(mTheoraInfo.frame_width,
mTheoraInfo.frame_height));
if (!v) {
LOG(
"Image allocation error source %ux%u display %ux%u picture %ux%u",
--- a/dom/media/platforms/agnostic/VPXDecoder.cpp
+++ b/dom/media/platforms/agnostic/VPXDecoder.cpp
@@ -203,34 +203,34 @@ VPXDecoder::ProcessDecode(MediaRawData*
}
RefPtr<VideoData> v;
if (!img_alpha) {
v = VideoData::CreateAndCopyData(mInfo,
mImageContainer,
aSample->mOffset,
aSample->mTime,
- aSample->mDuration,
+ aSample->mDuration.ToMicroseconds(),
b,
aSample->mKeyframe,
aSample->mTimecode,
mInfo.ScaledImageRect(img->d_w,
img->d_h));
} else {
VideoData::YCbCrBuffer::Plane alpha_plane;
alpha_plane.mData = img_alpha->planes[0];
alpha_plane.mStride = img_alpha->stride[0];
alpha_plane.mHeight = img_alpha->d_h;
alpha_plane.mWidth = img_alpha->d_w;
alpha_plane.mOffset = alpha_plane.mSkip = 0;
v = VideoData::CreateAndCopyData(mInfo,
mImageContainer,
aSample->mOffset,
aSample->mTime,
- aSample->mDuration,
+ aSample->mDuration.ToMicroseconds(),
b,
alpha_plane,
aSample->mKeyframe,
aSample->mTimecode,
mInfo.ScaledImageRect(img->d_w,
img->d_h));
}
--- a/dom/media/platforms/agnostic/eme/DecryptThroughputLimit.h
+++ b/dom/media/platforms/agnostic/eme/DecryptThroughputLimit.h
@@ -41,17 +41,17 @@ public:
// Forget decrypts that happened before the start of our window.
const TimeStamp now = TimeStamp::Now();
while (!mDecrypts.empty() && mDecrypts.front().mTimestamp < now - WindowSize) {
mDecrypts.pop_front();
}
// How much time duration of the media would we have decrypted inside the
// time window if we did decrypt this block?
- TimeDuration sampleDuration = TimeDuration::FromMicroseconds(aSample->mDuration);
+ TimeDuration sampleDuration = aSample->mDuration.ToTimeDuration();
TimeDuration durationDecrypted = sampleDuration;
for (const DecryptedJob& job : mDecrypts) {
durationDecrypted += job.mSampleDuration;
}
if (durationDecrypted < MaxThroughput) {
// If we decrypted a sample of this duration, we would *not* have
// decrypted more than our threshold for max throughput, over the
--- a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
+++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
@@ -197,17 +197,17 @@ GMPVideoDecoder::CreateFrame(MediaRawDat
}
frame->SetBufferType(GMP_BufferLength32);
frame->SetEncodedWidth(mConfig.mDisplay.width);
frame->SetEncodedHeight(mConfig.mDisplay.height);
frame->SetTimeStamp(aSample->mTime);
frame->SetCompleteFrame(true);
- frame->SetDuration(aSample->mDuration);
+ frame->SetDuration(aSample->mDuration.ToMicroseconds());
frame->SetFrameType(aSample->mKeyframe ? kGMPKeyFrame : kGMPDeltaFrame);
return frame;
}
const VideoInfo&
GMPVideoDecoder::GetConfig() const
{
--- a/dom/media/platforms/android/RemoteDataDecoder.cpp
+++ b/dom/media/platforms/android/RemoteDataDecoder.cpp
@@ -218,17 +218,18 @@ public:
RefPtr<MediaDataDecoder::DecodePromise> Decode(MediaRawData* aSample) override
{
const VideoInfo* config = aSample->mTrackInfo
? aSample->mTrackInfo->GetAsVideoInfo()
: &mConfig;
MOZ_ASSERT(config);
- InputInfo info(aSample->mDuration, config->mImage, config->mDisplay);
+ InputInfo info(
+ aSample->mDuration.ToMicroseconds(), config->mImage, config->mDisplay);
mInputInfos.Insert(aSample->mTime, info);
return RemoteDataDecoder::Decode(aSample);
}
bool SupportDecoderRecycling() const override
{
return mIsCodecSupportAdaptivePlayback;
}
--- a/dom/media/platforms/apple/AppleATDecoder.cpp
+++ b/dom/media/platforms/apple/AppleATDecoder.cpp
@@ -62,17 +62,18 @@ AppleATDecoder::Init()
return InitPromise::CreateAndResolve(TrackType::kAudioTrack, __func__);
}
RefPtr<MediaDataDecoder::DecodePromise>
AppleATDecoder::Decode(MediaRawData* aSample)
{
LOG("mp4 input sample %p %lld us %lld pts%s %llu bytes audio", aSample,
- aSample->mDuration, aSample->mTime, aSample->mKeyframe ? " keyframe" : "",
+ aSample->mDuration.ToMicroseconds(), aSample->mTime,
+ aSample->mKeyframe ? " keyframe" : "",
(unsigned long long)aSample->Size());
RefPtr<AppleATDecoder> self = this;
RefPtr<MediaRawData> sample = aSample;
return InvokeAsync(mTaskQueue, __func__, [self, this, sample] {
return ProcessDecode(sample);
});
}
--- a/dom/media/platforms/apple/AppleVTDecoder.cpp
+++ b/dom/media/platforms/apple/AppleVTDecoder.cpp
@@ -74,17 +74,17 @@ AppleVTDecoder::Init()
}
RefPtr<MediaDataDecoder::DecodePromise>
AppleVTDecoder::Decode(MediaRawData* aSample)
{
LOG("mp4 input sample %p pts %lld duration %lld us%s %" PRIuSIZE " bytes",
aSample,
aSample->mTime,
- aSample->mDuration,
+ aSample->mDuration.ToMicroseconds(),
aSample->mKeyframe ? " keyframe" : "",
aSample->Size());
RefPtr<AppleVTDecoder> self = this;
RefPtr<MediaRawData> sample = aSample;
return InvokeAsync(mTaskQueue, __func__, [self, this, sample] {
RefPtr<DecodePromise> p;
{
@@ -124,17 +124,18 @@ AppleVTDecoder::Shutdown()
}
// Helper to fill in a timestamp structure.
static CMSampleTimingInfo
TimingInfoFromSample(MediaRawData* aSample)
{
CMSampleTimingInfo timestamp;
- timestamp.duration = CMTimeMake(aSample->mDuration, USECS_PER_S);
+ timestamp.duration = CMTimeMake(
+ aSample->mDuration.ToMicroseconds(), USECS_PER_S);
timestamp.presentationTimeStamp =
CMTimeMake(aSample->mTime, USECS_PER_S);
timestamp.decodeTimeStamp =
CMTimeMake(aSample->mTimecode, USECS_PER_S);
return timestamp;
}
--- a/dom/media/platforms/apple/AppleVTDecoder.h
+++ b/dom/media/platforms/apple/AppleVTDecoder.h
@@ -29,17 +29,17 @@ public:
media::TimeUnit composition_timestamp;
media::TimeUnit duration;
int64_t byte_offset;
bool is_sync_point;
explicit AppleFrameRef(const MediaRawData& aSample)
: decode_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTimecode))
, composition_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTime))
- , duration(media::TimeUnit::FromMicroseconds(aSample.mDuration))
+ , duration(aSample.mDuration)
, byte_offset(aSample.mOffset)
, is_sync_point(aSample.mKeyframe)
{
}
};
RefPtr<InitPromise> Init() override;
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -236,17 +236,17 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
packet.pos = aSample->mOffset;
// LibAV provides no API to retrieve the decoded sample's duration.
// (FFmpeg >= 1.0 provides av_frame_get_pkt_duration)
// As such we instead use a map using the dts as key that we will retrieve
// later.
// The map will have a typical size of 16 entry.
- mDurationMap.Insert(aSample->mTimecode, aSample->mDuration);
+ mDurationMap.Insert(aSample->mTimecode, aSample->mDuration.ToMicroseconds());
if (!PrepareFrame()) {
NS_WARNING("FFmpeg h264 decoder failed to allocate frame.");
return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
}
// Required with old version of FFmpeg/LibAV
mFrame->reordered_opaque = AV_NOPTS_VALUE;
@@ -277,17 +277,17 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
int64_t pts = mPtsContext.GuessCorrectPts(mFrame->pkt_pts, mFrame->pkt_dts);
// Retrieve duration from dts.
// We use the first entry found matching this dts (this is done to
// handle damaged file with multiple frames with the same dts)
int64_t duration;
if (!mDurationMap.Find(mFrame->pkt_dts, duration)) {
NS_WARNING("Unable to retrieve duration from map");
- duration = aSample->mDuration;
+ duration = aSample->mDuration.ToMicroseconds();
// dts are probably incorrectly reported ; so clear the map as we're
// unlikely to find them in the future anyway. This also guards
// against the map becoming extremely big.
mDurationMap.Clear();
}
FFMPEG_LOG(
"Got one frame output with pts=%" PRId64 " dts=%" PRId64
" duration=%" PRId64 " opaque=%" PRId64,
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -667,17 +667,17 @@ WMFVideoMFTManager::Input(MediaRawData*
RefPtr<IMFSample> inputSample;
HRESULT hr = mDecoder->CreateInputSample(aSample->Data(),
uint32_t(aSample->Size()),
aSample->mTime,
&inputSample);
NS_ENSURE_TRUE(SUCCEEDED(hr) && inputSample != nullptr, hr);
- mLastDuration = aSample->mDuration;
+ mLastDuration = aSample->mDuration.ToMicroseconds();
mLastTime = aSample->mTime;
mSamplesCount++;
// Forward sample data to the decoder.
return mDecoder->Input(inputSample);
}
class SupportsConfigEvent : public Runnable {
@@ -1028,17 +1028,17 @@ WMFVideoMFTManager::Output(int64_t aStre
// Frame should be non null only when we succeeded.
MOZ_ASSERT((frame != nullptr) == SUCCEEDED(hr));
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
NS_ENSURE_TRUE(frame, E_FAIL);
aOutData = frame;
// Set the potentially corrected pts and duration.
aOutData->mTime = pts.ToMicroseconds();
- aOutData->mDuration = duration.ToMicroseconds();
+ aOutData->mDuration = duration;
if (mNullOutputCount) {
mGotValidOutputAfterNullOutput = true;
}
return S_OK;
}
--- a/dom/media/wave/WaveDemuxer.cpp
+++ b/dom/media/wave/WaveDemuxer.cpp
@@ -529,27 +529,27 @@ WAVTrackDemuxer::GetNextChunk(const Medi
UpdateState(aRange);
++mNumParsedChunks;
++mChunkIndex;
datachunk->mTime = Duration(mChunkIndex - 1).ToMicroseconds();
if (static_cast<uint32_t>(mChunkIndex) * DATA_CHUNK_SIZE < mDataLength) {
- datachunk->mDuration = Duration(1).ToMicroseconds();
+ datachunk->mDuration = Duration(1);
} else {
uint32_t mBytesRemaining =
mDataLength - mChunkIndex * DATA_CHUNK_SIZE;
- datachunk->mDuration = DurationFromBytes(mBytesRemaining).ToMicroseconds();
+ datachunk->mDuration = DurationFromBytes(mBytesRemaining);
}
datachunk->mTimecode = datachunk->mTime;
datachunk->mKeyframe = true;
MOZ_ASSERT(datachunk->mTime >= 0);
- MOZ_ASSERT(datachunk->mDuration >= 0);
+ MOZ_ASSERT(!datachunk->mDuration.IsNegative());
return datachunk.forget();
}
already_AddRefed<MediaRawData>
WAVTrackDemuxer::GetFileHeader(const MediaByteRange& aRange)
{
if (!aRange.Length()) {
--- a/dom/media/webm/WebMDemuxer.cpp
+++ b/dom/media/webm/WebMDemuxer.cpp
@@ -719,17 +719,17 @@ WebMDemuxer::GetNextPacket(TrackInfo::Tr
sample = new MediaRawData(data, length);
if (length && !sample->Data()) {
// OOM.
return NS_ERROR_OUT_OF_MEMORY;
}
}
sample->mTimecode = tstamp;
sample->mTime = tstamp;
- sample->mDuration = next_tstamp - tstamp;
+ sample->mDuration = media::TimeUnit::FromMicroseconds(next_tstamp - tstamp);
sample->mOffset = holder->Offset();
sample->mKeyframe = isKeyframe;
if (discardPadding && i == count - 1) {
CheckedInt64 discardFrames;
if (discardPadding < 0) {
// This is an invalid value as discard padding should never be negative.
// Set to maximum value so that the decoder will reject it as it's
// greater than the number of frames available.
--- a/media/libstagefright/binding/Index.cpp
+++ b/media/libstagefright/binding/Index.cpp
@@ -97,17 +97,17 @@ already_AddRefed<MediaRawData> SampleIte
if (s->mByteRange.mEnd > length) {
// We don't have this complete sample.
return nullptr;
}
RefPtr<MediaRawData> sample = new MediaRawData();
sample->mTimecode= s->mDecodeTime;
sample->mTime = s->mCompositionRange.start;
- sample->mDuration = s->mCompositionRange.Length();
+ sample->mDuration = TimeUnit::FromMicroseconds(s->mCompositionRange.Length());
sample->mOffset = s->mByteRange.mStart;
sample->mKeyframe = s->mSync;
nsAutoPtr<MediaRawDataWriter> writer(sample->CreateWriter());
// Do the blocking read
if (!writer->SetSize(s->mByteRange.Length())) {
return nullptr;
}