--- a/dom/media/AudioCompactor.h
+++ b/dom/media/AudioCompactor.h
@@ -38,16 +38,18 @@ public:
// buffer given its length (in AudioDataValue elements). The number of frames
// copied must be returned. This copy functor must support being called
// multiple times in order to copy the audio data fully. The copy functor
// must copy full frames as partial frames will be ignored.
template<typename CopyFunc>
bool Push(int64_t aOffset, int64_t aTime, int32_t aSampleRate,
uint32_t aFrames, uint32_t aChannels, CopyFunc aCopyFunc)
{
+ auto time = media::TimeUnit::FromMicroseconds(aTime);
+
// If we are losing more than a reasonable amount to padding, try to chunk
// the data.
size_t maxSlop = AudioDataSize(aFrames, aChannels) / MAX_SLOP_DIVISOR;
while (aFrames > 0) {
uint32_t samples = GetChunkSamples(aFrames, aChannels, maxSlop);
if (samples / aChannels > mSamplesPadding / aChannels + 1) {
samples -= mSamplesPadding;
@@ -58,32 +60,32 @@ public:
}
// Copy audio data to buffer using caller-provided functor.
uint32_t framesCopied = aCopyFunc(buffer.get(), samples);
NS_ASSERTION(framesCopied <= aFrames, "functor copied too many frames");
buffer.SetLength(size_t(framesCopied) * aChannels);
- CheckedInt64 duration = FramesToUsecs(framesCopied, aSampleRate);
- if (!duration.isValid()) {
+ auto duration = FramesToTimeUnit(framesCopied, aSampleRate);
+ if (!duration.IsValid()) {
return false;
}
mQueue.Push(new AudioData(aOffset,
- aTime,
- duration.value(),
+ time,
+ duration,
framesCopied,
Move(buffer),
aChannels,
aSampleRate));
// Remove the frames we just pushed into the queue and loop if there is
// more to be done.
- aTime += duration.value();
+ time += duration;
aFrames -= framesCopied;
// NOTE: No need to update aOffset as its only an approximation anyway.
}
return true;
}
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -68,18 +68,18 @@ AudioData::IsAudible() const
}
}
return false;
}
/* static */
already_AddRefed<AudioData>
AudioData::TransferAndUpdateTimestampAndDuration(AudioData* aOther,
- int64_t aTimestamp,
- int64_t aDuration)
+ const TimeUnit& aTimestamp,
+ const TimeUnit& aDuration)
{
NS_ENSURE_TRUE(aOther, nullptr);
RefPtr<AudioData> v = new AudioData(aOther->mOffset,
aTimestamp,
aDuration,
aOther->mFrames,
Move(aOther->mAudioData),
aOther->mChannels,
@@ -158,30 +158,30 @@ IsInEmulator()
char propQemu[PROPERTY_VALUE_MAX];
property_get("ro.kernel.qemu", propQemu, "");
return !strncmp(propQemu, "1", 1);
}
#endif
VideoData::VideoData(int64_t aOffset,
- int64_t aTime,
- int64_t aDuration,
+ const TimeUnit& aTime,
+ const TimeUnit& aDuration,
bool aKeyframe,
- int64_t aTimecode,
+ const TimeUnit& aTimecode,
IntSize aDisplay,
layers::ImageContainer::FrameID aFrameID)
: MediaData(VIDEO_DATA, aOffset, aTime, aDuration, 1)
, mDisplay(aDisplay)
, mFrameID(aFrameID)
, mSentToCompositor(false)
{
MOZ_ASSERT(!mDuration.IsNegative(), "Frame must have non-negative duration.");
mKeyframe = aKeyframe;
- mTimecode = TimeUnit::FromMicroseconds(aTimecode);
+ mTimecode = aTimecode;
}
VideoData::~VideoData()
{
}
void
VideoData::SetListener(UniquePtr<Listener> aListener)
@@ -280,43 +280,43 @@ bool VideoData::SetVideoDataToImage(Plan
}
}
/* static */
already_AddRefed<VideoData>
VideoData::CreateAndCopyData(const VideoInfo& aInfo,
ImageContainer* aContainer,
int64_t aOffset,
- int64_t aTime,
+ const TimeUnit& aTime,
const TimeUnit& aDuration,
const YCbCrBuffer& aBuffer,
bool aKeyframe,
- int64_t aTimecode,
+ const TimeUnit& aTimecode,
const IntRect& aPicture)
{
if (!aContainer) {
// Create a dummy VideoData with no image. This gives us something to
// send to media streams if necessary.
RefPtr<VideoData> v(new VideoData(aOffset,
aTime,
- aDuration.ToMicroseconds(),
+ aDuration,
aKeyframe,
aTimecode,
aInfo.mDisplay,
0));
return v.forget();
}
if (!ValidateBufferAndPicture(aBuffer, aPicture)) {
return nullptr;
}
RefPtr<VideoData> v(new VideoData(aOffset,
aTime,
- aDuration.ToMicroseconds(),
+ aDuration,
aKeyframe,
aTimecode,
aInfo.mDisplay,
0));
#ifdef MOZ_WIDGET_GONK
const YCbCrBuffer::Plane &Y = aBuffer.mPlanes[0];
const YCbCrBuffer::Plane &Cb = aBuffer.mPlanes[1];
const YCbCrBuffer::Plane &Cr = aBuffer.mPlanes[2];
@@ -364,44 +364,44 @@ VideoData::CreateAndCopyData(const Video
}
/* static */
already_AddRefed<VideoData>
VideoData::CreateAndCopyData(const VideoInfo& aInfo,
ImageContainer* aContainer,
int64_t aOffset,
- int64_t aTime,
+ const TimeUnit& aTime,
const TimeUnit& aDuration,
const YCbCrBuffer& aBuffer,
const YCbCrBuffer::Plane &aAlphaPlane,
bool aKeyframe,
- int64_t aTimecode,
+ const TimeUnit& aTimecode,
const IntRect& aPicture)
{
if (!aContainer) {
// Create a dummy VideoData with no image. This gives us something to
// send to media streams if necessary.
RefPtr<VideoData> v(new VideoData(aOffset,
aTime,
- aDuration.ToMicroseconds(),
+ aDuration,
aKeyframe,
aTimecode,
aInfo.mDisplay,
0));
return v.forget();
}
if (!ValidateBufferAndPicture(aBuffer, aPicture)) {
return nullptr;
}
RefPtr<VideoData> v(new VideoData(aOffset,
aTime,
- aDuration.ToMicroseconds(),
+ aDuration,
aKeyframe,
aTimecode,
aInfo.mDisplay,
0));
// Convert from YUVA to BGRA format on the software side.
RefPtr<layers::SharedRGBImage> videoImage =
aContainer->CreateSharedRGBImage();
@@ -430,25 +430,25 @@ VideoData::CreateAndCopyData(const Video
return v.forget();
}
/* static */
already_AddRefed<VideoData>
VideoData::CreateFromImage(const IntSize& aDisplay,
int64_t aOffset,
- int64_t aTime,
+ const TimeUnit& aTime,
const TimeUnit& aDuration,
const RefPtr<Image>& aImage,
bool aKeyframe,
- int64_t aTimecode)
+ const TimeUnit& aTimecode)
{
RefPtr<VideoData> v(new VideoData(aOffset,
aTime,
- aDuration.ToMicroseconds(),
+ aDuration,
aKeyframe,
aTimecode,
aDisplay,
0));
v->mImage = aImage;
return v.forget();
}
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -284,24 +284,24 @@ public:
AUDIO_DATA = 0,
VIDEO_DATA,
RAW_DATA,
NULL_DATA
};
MediaData(Type aType,
int64_t aOffset,
- int64_t aTimestamp,
- int64_t aDuration,
+ const media::TimeUnit& aTimestamp,
+ const media::TimeUnit& aDuration,
uint32_t aFrames)
: mType(aType)
, mOffset(aOffset)
- , mTime(media::TimeUnit::FromMicroseconds(aTimestamp))
- , mTimecode(media::TimeUnit::FromMicroseconds(aTimestamp))
- , mDuration(media::TimeUnit::FromMicroseconds(aDuration))
+ , mTime(aTimestamp)
+ , mTimecode(aTimestamp)
+ , mDuration(aDuration)
, mFrames(aFrames)
, mKeyframe(false)
{
}
// Type of contained data.
const Type mType;
@@ -361,32 +361,34 @@ protected:
};
// NullData is for decoder generating a sample which doesn't need to be
// rendered.
class NullData : public MediaData
{
public:
- NullData(int64_t aOffset, int64_t aTime, int64_t aDuration)
+ NullData(int64_t aOffset,
+ const media::TimeUnit& aTime,
+ const media::TimeUnit& aDuration)
: MediaData(NULL_DATA, aOffset, aTime, aDuration, 0)
{
}
static const Type sType = NULL_DATA;
};
// Holds chunk a decoded audio frames.
class AudioData : public MediaData
{
public:
AudioData(int64_t aOffset,
- int64_t aTime,
- int64_t aDuration,
+ const media::TimeUnit& aTime,
+ const media::TimeUnit& aDuration,
uint32_t aFrames,
AlignedAudioBuffer&& aData,
uint32_t aChannels,
uint32_t aRate)
: MediaData(sType, aOffset, aTime, aDuration, aFrames)
, mChannels(aChannels)
, mRate(aRate)
, mAudioData(Move(aData))
@@ -397,18 +399,18 @@ public:
static const char* sTypeName;
// Creates a new AudioData identical to aOther, but with a different
// specified timestamp and duration. All data from aOther is copied
// into the new AudioData but the audio data which is transferred.
// After such call, the original aOther is unusable.
static already_AddRefed<AudioData>
TransferAndUpdateTimestampAndDuration(AudioData* aOther,
- int64_t aTimestamp,
- int64_t aDuration);
+ const media::TimeUnit& aTimestamp,
+ const media::TimeUnit& aDuration);
size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
// If mAudioBuffer is null, creates it from mAudioData.
void EnsureAudioBuffer();
// To check whether mAudioData has audible signal, it's used to distinguish
// the audiable data and silent data.
@@ -484,53 +486,53 @@ public:
// Creates a new VideoData containing a deep copy of aBuffer. May use
// aContainer to allocate an Image to hold the copied data.
static already_AddRefed<VideoData> CreateAndCopyData(
const VideoInfo& aInfo,
ImageContainer* aContainer,
int64_t aOffset,
- int64_t aTime,
+ const media::TimeUnit& aTime,
const media::TimeUnit& aDuration,
const YCbCrBuffer& aBuffer,
bool aKeyframe,
- int64_t aTimecode,
+ const media::TimeUnit& aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateAndCopyData(
const VideoInfo& aInfo,
ImageContainer* aContainer,
int64_t aOffset,
- int64_t aTime,
+ const media::TimeUnit& aTime,
const media::TimeUnit& aDuration,
const YCbCrBuffer& aBuffer,
const YCbCrBuffer::Plane& aAlphaPlane,
bool aKeyframe,
- int64_t aTimecode,
+ const media::TimeUnit& aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateAndCopyIntoTextureClient(
const VideoInfo& aInfo,
int64_t aOffset,
- int64_t aTime,
+ const media::TimeUnit& aTime,
const media::TimeUnit& aDuration,
layers::TextureClient* aBuffer,
bool aKeyframe,
- int64_t aTimecode,
+ const media::TimeUnit& aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateFromImage(
const IntSize& aDisplay,
int64_t aOffset,
- int64_t aTime,
+ const media::TimeUnit& aTime,
const media::TimeUnit& aDuration,
const RefPtr<Image>& aImage,
bool aKeyframe,
- int64_t aTimecode);
+ const media::TimeUnit& aTimecode);
// Initialize PlanarYCbCrImage. Only When aCopyData is true,
// video data is copied to PlanarYCbCrImage.
static bool SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
const VideoInfo& aInfo,
const YCbCrBuffer& aBuffer,
const IntRect& aPicture,
bool aCopyData);
@@ -543,20 +545,20 @@ public:
const IntSize mDisplay;
// This frame's image.
RefPtr<Image> mImage;
int32_t mFrameID;
VideoData(int64_t aOffset,
- int64_t aTime,
- int64_t aDuration,
+ const media::TimeUnit& aTime,
+ const media::TimeUnit& aDuration,
bool aKeyframe,
- int64_t aTimecode,
+ const media::TimeUnit& aTimecode,
IntSize aDisplay,
uint32_t aFrameID);
void SetListener(UniquePtr<Listener> aListener);
void MarkSentToCompositor();
bool IsSentToCompositor() { return mSentToCompositor; }
void UpdateDuration(const media::TimeUnit& aDuration);
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -1385,18 +1385,18 @@ private:
memcpy(audioData.get(),
aAudio->mAudioData.get() + (framesToPrune.value() * channels),
frames * channels * sizeof(AudioDataValue));
auto duration = FramesToTimeUnit(frames, Info().mAudio.mRate);
if (!duration.IsValid()) {
return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
}
RefPtr<AudioData> data(new AudioData(
- aAudio->mOffset, mSeekJob.mTarget->GetTime().ToMicroseconds(),
- duration.ToMicroseconds(), frames, Move(audioData), channels,
+ aAudio->mOffset, mSeekJob.mTarget->GetTime(),
+ duration, frames, Move(audioData), channels,
aAudio->mRate));
MOZ_ASSERT(AudioQueue().GetSize() == 0,
"Should be the 1st sample after seeking");
mMaster->PushAudio(data);
mDoneAudioSeeking = true;
return NS_OK;
}
--- a/dom/media/android/AndroidMediaReader.cpp
+++ b/dom/media/android/AndroidMediaReader.cpp
@@ -171,21 +171,21 @@ bool AndroidMediaReader::DecodeVideoFram
currentImage = bufferCallback.GetImage();
int64_t pos = mDecoder->GetResource()->Tell();
IntRect picture = mPicture;
RefPtr<VideoData> v;
if (currentImage) {
v = VideoData::CreateFromImage(mInfo.mVideo.mDisplay,
pos,
- frame.mTimeUs,
+ TimeUnit::FromMicroseconds(frame.mTimeUs),
TimeUnit::FromMicroseconds(1), // We don't know the duration yet.
currentImage,
frame.mKeyFrame,
- -1);
+ TimeUnit::FromMicroseconds(-1));
} else {
// Assume YUV
VideoData::YCbCrBuffer b;
b.mPlanes[0].mData = static_cast<uint8_t *>(frame.Y.mData);
b.mPlanes[0].mStride = frame.Y.mStride;
b.mPlanes[0].mHeight = frame.Y.mHeight;
b.mPlanes[0].mWidth = frame.Y.mWidth;
b.mPlanes[0].mOffset = frame.Y.mOffset;
@@ -216,21 +216,21 @@ bool AndroidMediaReader::DecodeVideoFram
picture.width = (frame.Y.mWidth * mPicture.width) / mInitialFrame.width;
picture.height = (frame.Y.mHeight * mPicture.height) / mInitialFrame.height;
}
// This is the approximate byte position in the stream.
v = VideoData::CreateAndCopyData(mInfo.mVideo,
mDecoder->GetImageContainer(),
pos,
- frame.mTimeUs,
+ TimeUnit::FromMicroseconds(frame.mTimeUs),
TimeUnit::FromMicroseconds(1), // We don't know the duration yet.
b,
frame.mKeyFrame,
- -1,
+ TimeUnit::FromMicroseconds(-1),
picture);
}
if (!v) {
return false;
}
a.mStats.mParsedFrames++;
a.mStats.mDecodedFrames++;
--- a/dom/media/gmp/ChromiumCDMParent.cpp
+++ b/dom/media/gmp/ChromiumCDMParent.cpp
@@ -635,21 +635,21 @@ ChromiumCDMParent::RecvDecoded(const CDM
b.mPlanes[2].mOffset = aFrame.mVPlane().mPlaneOffset();
b.mPlanes[2].mSkip = 0;
gfx::IntRect pictureRegion(0, 0, aFrame.mImageWidth(), aFrame.mImageHeight());
RefPtr<VideoData> v = VideoData::CreateAndCopyData(
mVideoInfo,
mImageContainer,
mLastStreamOffset,
- aFrame.mTimestamp(),
+ media::TimeUnit::FromMicroseconds(aFrame.mTimestamp()),
media::TimeUnit::FromMicroseconds(aFrame.mDuration()),
b,
false,
- -1,
+ media::TimeUnit::FromMicroseconds(-1),
pictureRegion);
// Return the shmem to the CDM so the shmem can be reused to send us
// another frame.
if (!SendGiveBuffer(aFrame.mData())) {
mDecodePromise.RejectIfExists(
MediaResult(NS_ERROR_OUT_OF_MEMORY,
RESULT_DETAIL("Can't return shmem to CDM process")),
--- a/dom/media/ipc/VideoDecoderChild.cpp
+++ b/dom/media/ipc/VideoDecoderChild.cpp
@@ -43,21 +43,21 @@ VideoDecoderChild::RecvOutput(const Vide
// The Image here creates a TextureData object that takes ownership
// of the SurfaceDescriptor, and is responsible for making sure that
// it gets deallocated.
RefPtr<Image> image = new GPUVideoImage(GetManager(), aData.sd(), aData.frameSize());
RefPtr<VideoData> video = VideoData::CreateFromImage(
aData.display(),
aData.base().offset(),
- aData.base().time(),
+ media::TimeUnit::FromMicroseconds(aData.base().time()),
media::TimeUnit::FromMicroseconds(aData.base().duration()),
image,
aData.base().keyframe(),
- aData.base().timecode());
+ media::TimeUnit::FromMicroseconds(aData.base().timecode()));
mDecodedData.AppendElement(Move(video));
return IPC_OK();
}
mozilla::ipc::IPCResult
VideoDecoderChild::RecvInputExhausted()
{
--- a/dom/media/mediasink/AudioSink.cpp
+++ b/dom/media/mediasink/AudioSink.cpp
@@ -489,26 +489,26 @@ AudioSink::PushProcessedAudio(AudioData*
already_AddRefed<AudioData>
AudioSink::CreateAudioFromBuffer(AlignedAudioBuffer&& aBuffer,
AudioData* aReference)
{
uint32_t frames = aBuffer.Length() / mOutputChannels;
if (!frames) {
return nullptr;
}
- CheckedInt64 duration = FramesToUsecs(frames, mOutputRate);
- if (!duration.isValid()) {
+ auto duration = FramesToTimeUnit(frames, mOutputRate);
+ if (!duration.IsValid()) {
NS_WARNING("Int overflow in AudioSink");
mErrored = true;
return nullptr;
}
RefPtr<AudioData> data =
new AudioData(aReference->mOffset,
- aReference->mTime.ToMicroseconds(),
- duration.value(),
+ aReference->mTime,
+ duration,
frames,
Move(aBuffer),
mOutputChannels,
mOutputRate);
return data.forget();
}
uint32_t
--- a/dom/media/platforms/agnostic/BlankDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/BlankDecoderModule.cpp
@@ -70,21 +70,21 @@ BlankVideoDataCreator::Create(MediaRawDa
buffer.mPlanes[2].mHeight = (mFrameHeight + 1) / 2;
buffer.mPlanes[2].mWidth = (mFrameWidth + 1) / 2;
buffer.mPlanes[2].mOffset = 0;
buffer.mPlanes[2].mSkip = 0;
return VideoData::CreateAndCopyData(mInfo,
mImageContainer,
aSample->mOffset,
- aSample->mTime.ToMicroseconds(),
+ aSample->mTime,
aSample->mDuration,
buffer,
aSample->mKeyframe,
- aSample->mTime.ToMicroseconds(),
+ aSample->mTime,
mPicture);
}
BlankAudioDataCreator::BlankAudioDataCreator(uint32_t aChannelCount, uint32_t aSampleRate)
: mFrameSum(0), mChannelCount(aChannelCount), mSampleRate(aSampleRate)
{
}
@@ -111,18 +111,18 @@ BlankAudioDataCreator::Create(MediaRawDa
for (int i = 0; i < frames.value(); i++) {
float f = sin(2 * pi * noteHz * mFrameSum / mSampleRate);
for (unsigned c = 0; c < mChannelCount; c++) {
samples[i * mChannelCount + c] = AudioDataValue(f);
}
mFrameSum++;
}
RefPtr<AudioData> data(new AudioData(aSample->mOffset,
- aSample->mTime.ToMicroseconds(),
- aSample->mDuration.ToMicroseconds(),
+ aSample->mTime,
+ aSample->mDuration,
uint32_t(frames.value()),
Move(samples),
mChannelCount,
mSampleRate));
return data.forget();
}
already_AddRefed<MediaDataDecoder>
--- a/dom/media/platforms/agnostic/NullDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/NullDecoderModule.cpp
@@ -12,20 +12,20 @@ class NullVideoDataCreator : public Dumm
public:
NullVideoDataCreator() {}
already_AddRefed<MediaData> Create(MediaRawData* aSample) override
{
// Create a dummy VideoData with no image. This gives us something to
// send to media streams if necessary.
RefPtr<VideoData> v(new VideoData(aSample->mOffset,
- aSample->mTime.ToMicroseconds(),
- aSample->mDuration.ToMicroseconds(),
+ aSample->mTime,
+ aSample->mDuration,
aSample->mKeyframe,
- aSample->mTimecode.ToMicroseconds(),
+ aSample->mTimecode,
gfx::IntSize(),
0));
return v.forget();
}
};
class NullDecoderModule : public PlatformDecoderModule {
public:
--- a/dom/media/platforms/agnostic/OpusDecoder.cpp
+++ b/dom/media/platforms/agnostic/OpusDecoder.cpp
@@ -227,28 +227,28 @@ OpusDataDecoder::ProcessDecode(MediaRawD
#endif
if (ret < 0) {
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("Opus decoding error:%d", ret)),
__func__);
}
NS_ASSERTION(ret == frames, "Opus decoded too few audio samples");
- CheckedInt64 startTime = aSample->mTime.ToMicroseconds();
+ auto startTime = aSample->mTime;
// Trim the initial frames while the decoder is settling.
if (mSkip > 0) {
int32_t skipFrames = std::min<int32_t>(mSkip, frames);
int32_t keepFrames = frames - skipFrames;
OPUS_DEBUG(
"Opus decoder skipping %d of %d frames", skipFrames, frames);
PodMove(buffer.get(),
buffer.get() + skipFrames * channels,
keepFrames * channels);
- startTime = startTime + FramesToUsecs(skipFrames, mOpusParser->mRate);
+ startTime = startTime + FramesToTimeUnit(skipFrames, mOpusParser->mRate);
frames = keepFrames;
mSkip -= skipFrames;
}
if (aSample->mDiscardPadding > 0) {
OPUS_DEBUG("Opus decoder discarding %u of %d frames",
aSample->mDiscardPadding, frames);
// Padding discard is only supposed to happen on the final packet.
@@ -282,38 +282,38 @@ OpusDataDecoder::ProcessDecode(MediaRawD
uint32_t samples = frames * channels;
for (uint32_t i = 0; i < samples; i++) {
int32_t val = static_cast<int32_t>((gain_Q16*buffer[i] + 32768)>>16);
buffer[i] = static_cast<AudioDataValue>(MOZ_CLIP_TO_15(val));
}
}
#endif
- CheckedInt64 duration = FramesToUsecs(frames, mOpusParser->mRate);
- if (!duration.isValid()) {
+ auto duration = FramesToTimeUnit(frames, mOpusParser->mRate);
+ if (!duration.IsValid()) {
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow converting WebM audio duration")),
__func__);
}
- CheckedInt64 time = startTime -
- FramesToUsecs(mOpusParser->mPreSkip, mOpusParser->mRate) +
- FramesToUsecs(mFrames, mOpusParser->mRate);
- if (!time.isValid()) {
+ auto time = startTime -
+ FramesToTimeUnit(mOpusParser->mPreSkip, mOpusParser->mRate) +
+ FramesToTimeUnit(mFrames, mOpusParser->mRate);
+ if (!time.IsValid()) {
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow shifting tstamp by codec delay")),
__func__);
};
mFrames += frames;
return DecodePromise::CreateAndResolve(
- DecodedData{ new AudioData(aSample->mOffset, time.value(), duration.value(),
+ DecodedData{ new AudioData(aSample->mOffset, time, duration,
frames, Move(buffer), mOpusParser->mChannels,
mOpusParser->mRate) },
__func__);
}
RefPtr<MediaDataDecoder::DecodePromise>
OpusDataDecoder::Drain()
{
--- a/dom/media/platforms/agnostic/TheoraDecoder.cpp
+++ b/dom/media/platforms/agnostic/TheoraDecoder.cpp
@@ -167,21 +167,21 @@ TheoraDecoder::ProcessDecode(MediaRawDat
mTheoraInfo.pic_width, mTheoraInfo.pic_height);
VideoInfo info;
info.mDisplay = mInfo.mDisplay;
RefPtr<VideoData> v =
VideoData::CreateAndCopyData(info,
mImageContainer,
aSample->mOffset,
- aSample->mTime.ToMicroseconds(),
+ aSample->mTime,
aSample->mDuration,
b,
aSample->mKeyframe,
- aSample->mTimecode.ToMicroseconds(),
+ aSample->mTimecode,
mInfo.ScaledImageRect(mTheoraInfo.frame_width,
mTheoraInfo.frame_height));
if (!v) {
LOG(
"Image allocation error source %ux%u display %ux%u picture %ux%u",
mTheoraInfo.frame_width,
mTheoraInfo.frame_height,
mInfo.mDisplay.width,
--- a/dom/media/platforms/agnostic/VPXDecoder.cpp
+++ b/dom/media/platforms/agnostic/VPXDecoder.cpp
@@ -202,39 +202,39 @@ VPXDecoder::ProcessDecode(MediaRawData*
__func__);
}
RefPtr<VideoData> v;
if (!img_alpha) {
v = VideoData::CreateAndCopyData(mInfo,
mImageContainer,
aSample->mOffset,
- aSample->mTime.ToMicroseconds(),
+ aSample->mTime,
aSample->mDuration,
b,
aSample->mKeyframe,
- aSample->mTimecode.ToMicroseconds(),
+ aSample->mTimecode,
mInfo.ScaledImageRect(img->d_w,
img->d_h));
} else {
VideoData::YCbCrBuffer::Plane alpha_plane;
alpha_plane.mData = img_alpha->planes[0];
alpha_plane.mStride = img_alpha->stride[0];
alpha_plane.mHeight = img_alpha->d_h;
alpha_plane.mWidth = img_alpha->d_w;
alpha_plane.mOffset = alpha_plane.mSkip = 0;
v = VideoData::CreateAndCopyData(mInfo,
mImageContainer,
aSample->mOffset,
- aSample->mTime.ToMicroseconds(),
+ aSample->mTime,
aSample->mDuration,
b,
alpha_plane,
aSample->mKeyframe,
- aSample->mTimecode.ToMicroseconds(),
+ aSample->mTimecode,
mInfo.ScaledImageRect(img->d_w,
img->d_h));
}
if (!v) {
LOG(
"Image allocation error source %ux%u display %ux%u picture %ux%u",
--- a/dom/media/platforms/agnostic/VorbisDecoder.cpp
+++ b/dom/media/platforms/agnostic/VorbisDecoder.cpp
@@ -136,17 +136,17 @@ VorbisDataDecoder::Decode(MediaRawData*
RefPtr<MediaDataDecoder::DecodePromise>
VorbisDataDecoder::ProcessDecode(MediaRawData* aSample)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
const unsigned char* aData = aSample->Data();
size_t aLength = aSample->Size();
int64_t aOffset = aSample->mOffset;
- int64_t aTstampUsecs = aSample->mTime.ToMicroseconds();
+ auto aTstampUsecs = aSample->mTime;
int64_t aTotalFrames = 0;
MOZ_ASSERT(mPacketCount >= 3);
if (!mLastFrameTime ||
mLastFrameTime.ref() != aSample->mTime.ToMicroseconds()) {
// We are starting a new block.
mFrames = 0;
@@ -190,33 +190,33 @@ VorbisDataDecoder::ProcessDecode(MediaRa
}
for (uint32_t j = 0; j < channels; ++j) {
VorbisPCMValue* channel = pcm[j];
for (uint32_t i = 0; i < uint32_t(frames); ++i) {
buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
}
}
- CheckedInt64 duration = FramesToUsecs(frames, rate);
- if (!duration.isValid()) {
+ auto duration = FramesToTimeUnit(frames, rate);
+ if (!duration.IsValid()) {
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow converting audio duration")),
__func__);
}
- CheckedInt64 total_duration = FramesToUsecs(mFrames, rate);
- if (!total_duration.isValid()) {
+ auto total_duration = FramesToTimeUnit(mFrames, rate);
+ if (!total_duration.IsValid()) {
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow converting audio total_duration")),
__func__);
}
- CheckedInt64 time = total_duration + aTstampUsecs;
- if (!time.isValid()) {
+ auto time = total_duration + aTstampUsecs;
+ if (!time.IsValid()) {
return DecodePromise::CreateAndReject(
MediaResult(
NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow adding total_duration and aTstampUsecs")),
__func__);
};
if (!mAudioConverter) {
@@ -232,17 +232,17 @@ VorbisDataDecoder::ProcessDecode(MediaRa
mAudioConverter = MakeUnique<AudioConverter>(in, out);
}
MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
AudioSampleBuffer data(Move(buffer));
data = mAudioConverter->Process(Move(data));
aTotalFrames += frames;
- results.AppendElement(new AudioData(aOffset, time.value(), duration.value(),
+ results.AppendElement(new AudioData(aOffset, time, duration,
frames, data.Forget(), channels, rate));
mFrames += frames;
err = vorbis_synthesis_read(&mVorbisDsp, frames);
if (err) {
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("vorbis_synthesis_read:%d", err)),
__func__);
--- a/dom/media/platforms/agnostic/WAVDecoder.cpp
+++ b/dom/media/platforms/agnostic/WAVDecoder.cpp
@@ -74,17 +74,16 @@ WaveDataDecoder::Decode(MediaRawData* aS
}
RefPtr<MediaDataDecoder::DecodePromise>
WaveDataDecoder::ProcessDecode(MediaRawData* aSample)
{
size_t aLength = aSample->Size();
ByteReader aReader(aSample->Data(), aLength);
int64_t aOffset = aSample->mOffset;
- uint64_t aTstampUsecs = aSample->mTime.ToMicroseconds();
int32_t frames = aLength * 8 / mInfo.mBitDepth / mInfo.mChannels;
AlignedAudioBuffer buffer(frames * mInfo.mChannels);
if (!buffer) {
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
}
@@ -113,20 +112,20 @@ WaveDataDecoder::ProcessDecode(MediaRawD
int32_t v = aReader.ReadLE24();
buffer[i * mInfo.mChannels + j] =
Int24bitToAudioSample<AudioDataValue>(v);
}
}
}
}
- int64_t duration = frames / mInfo.mRate;
+ auto duration = media::TimeUnit::FromMicroseconds(frames / mInfo.mRate);
return DecodePromise::CreateAndResolve(
- DecodedData{ new AudioData(aOffset, aTstampUsecs, duration, frames,
+ DecodedData{ new AudioData(aOffset, aSample->mTime, duration, frames,
Move(buffer), mInfo.mChannels, mInfo.mRate) },
__func__);
}
RefPtr<MediaDataDecoder::DecodePromise>
WaveDataDecoder::Drain()
{
return InvokeAsync(mTaskQueue, __func__, [] {
--- a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
+++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
@@ -61,21 +61,21 @@ GMPVideoDecoder::Decoded(GMPVideoi420Fra
}
gfx::IntRect pictureRegion(
0, 0, decodedFrame->Width(), decodedFrame->Height());
RefPtr<VideoData> v = VideoData::CreateAndCopyData(
mConfig,
mImageContainer,
mLastStreamOffset,
- decodedFrame->Timestamp(),
+ media::TimeUnit::FromMicroseconds(decodedFrame->Timestamp()),
media::TimeUnit::FromMicroseconds(decodedFrame->Duration()),
b,
false,
- -1,
+ media::TimeUnit::FromMicroseconds(-1),
pictureRegion);
RefPtr<GMPVideoDecoder> self = this;
if (v) {
mDecodedData.AppendElement(Move(v));
} else {
mDecodedData.Clear();
mDecodePromise.RejectIfExists(
MediaResult(NS_ERROR_OUT_OF_MEMORY,
--- a/dom/media/platforms/android/RemoteDataDecoder.cpp
+++ b/dom/media/platforms/android/RemoteDataDecoder.cpp
@@ -133,20 +133,21 @@ public:
}
if (size > 0) {
RefPtr<layers::Image> img = new SurfaceTextureImage(
mDecoder->mSurfaceTexture.get(), inputInfo.mImageSize,
gl::OriginPos::BottomLeft);
RefPtr<VideoData> v = VideoData::CreateFromImage(
- inputInfo.mDisplaySize, offset, presentationTimeUs,
+ inputInfo.mDisplaySize, offset,
+ TimeUnit::FromMicroseconds(presentationTimeUs),
TimeUnit::FromMicroseconds(inputInfo.mDurationUs),
img, !!(flags & MediaCodec::BUFFER_FLAG_SYNC_FRAME),
- presentationTimeUs);
+ TimeUnit::FromMicroseconds(presentationTimeUs));
v->SetListener(Move(releaseSample));
mDecoder->UpdateOutputStatus(v);
}
if (isEOS) {
mDecoder->DrainComplete();
}
@@ -339,18 +340,18 @@ private:
return;
}
jni::ByteBuffer::LocalRef dest =
jni::ByteBuffer::New(audio.get(), size);
aSample->WriteToByteBuffer(dest);
RefPtr<AudioData> data = new AudioData(
- 0, presentationTimeUs,
- FramesToUsecs(numFrames, mOutputSampleRate).value(), numFrames,
+ 0, TimeUnit::FromMicroseconds(presentationTimeUs),
+ FramesToTimeUnit(numFrames, mOutputSampleRate), numFrames,
Move(audio), mOutputChannels, mOutputSampleRate);
mDecoder->UpdateOutputStatus(data);
}
if ((flags & MediaCodec::BUFFER_FLAG_END_OF_STREAM) != 0) {
mDecoder->DrainComplete();
}
--- a/dom/media/platforms/apple/AppleATDecoder.cpp
+++ b/dom/media/platforms/apple/AppleATDecoder.cpp
@@ -318,18 +318,18 @@ AppleATDecoder::DecodeSample(MediaRawDat
mAudioConverter = MakeUnique<AudioConverter>(in, out);
}
if (mAudioConverter) {
MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
data = mAudioConverter->Process(Move(data));
}
RefPtr<AudioData> audio = new AudioData(aSample->mOffset,
- aSample->mTime.ToMicroseconds(),
- duration.ToMicroseconds(),
+ aSample->mTime,
+ duration,
numFrames,
data.Forget(),
channels,
rate);
mDecodedSamples.AppendElement(Move(audio));
return NS_OK;
}
--- a/dom/media/platforms/apple/AppleVTDecoder.cpp
+++ b/dom/media/platforms/apple/AppleVTDecoder.cpp
@@ -351,18 +351,18 @@ AppleVTDecoder::OutputFrame(CVPixelBuffe
// Where our resulting image will end up.
RefPtr<MediaData> data;
// Bounds.
VideoInfo info;
info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
if (useNullSample) {
data = new NullData(aFrameRef.byte_offset,
- aFrameRef.composition_timestamp.ToMicroseconds(),
- aFrameRef.duration.ToMicroseconds());
+ aFrameRef.composition_timestamp,
+ aFrameRef.duration);
} else if (mUseSoftwareImages) {
size_t width = CVPixelBufferGetWidth(aImage);
size_t height = CVPixelBufferGetHeight(aImage);
DebugOnly<size_t> planes = CVPixelBufferGetPlaneCount(aImage);
MOZ_ASSERT(planes == 2, "Likely not NV12 format and it must be.");
VideoData::YCbCrBuffer buffer;
@@ -407,41 +407,41 @@ AppleVTDecoder::OutputFrame(CVPixelBuffe
mPictureWidth,
mPictureHeight);
// Copy the image data into our own format.
data =
VideoData::CreateAndCopyData(info,
mImageContainer,
aFrameRef.byte_offset,
- aFrameRef.composition_timestamp.ToMicroseconds(),
+ aFrameRef.composition_timestamp,
aFrameRef.duration,
buffer,
aFrameRef.is_sync_point,
- aFrameRef.decode_timestamp.ToMicroseconds(),
+ aFrameRef.decode_timestamp,
visible);
// Unlock the returned image data.
CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
} else {
#ifndef MOZ_WIDGET_UIKIT
IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer");
RefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);
RefPtr<layers::Image> image = new MacIOSurfaceImage(macSurface);
data =
VideoData::CreateFromImage(info.mDisplay,
aFrameRef.byte_offset,
- aFrameRef.composition_timestamp.ToMicroseconds(),
+ aFrameRef.composition_timestamp,
aFrameRef.duration,
image.forget(),
aFrameRef.is_sync_point,
- aFrameRef.decode_timestamp.ToMicroseconds());
+ aFrameRef.decode_timestamp);
#else
MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
#endif
}
if (!data) {
NS_ERROR("Couldn't create VideoData for frame");
MonitorAutoLock mon(mMonitor);
--- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
@@ -199,17 +199,17 @@ FFmpegAudioDecoder<LIBAV_VER>::ProcessDe
return DecodePromise::CreateAndReject(
MediaResult(
NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Invalid count of accumulated audio samples")),
__func__);
}
results.AppendElement(new AudioData(
- samplePosition, pts.ToMicroseconds(), duration.ToMicroseconds(),
+ samplePosition, pts, duration,
mFrame->nb_samples, Move(audio), numChannels, samplingRate));
pts = newpts;
}
packet.data += bytesConsumed;
packet.size -= bytesConsumed;
samplePosition += bytesConsumed;
}
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -338,21 +338,21 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
default:
break;
}
}
RefPtr<VideoData> v =
VideoData::CreateAndCopyData(mInfo,
mImageContainer,
aSample->mOffset,
- pts,
+ TimeUnit::FromMicroseconds(pts),
TimeUnit::FromMicroseconds(duration),
b,
!!mFrame->key_frame,
- -1,
+ TimeUnit::FromMicroseconds(-1),
mInfo.ScaledImageRect(mFrame->width,
mFrame->height));
if (!v) {
return MediaResult(NS_ERROR_OUT_OF_MEMORY,
RESULT_DETAIL("image allocation error"));
}
aResults.AppendElement(Move(v));
--- a/dom/media/platforms/omx/OmxDataDecoder.cpp
+++ b/dom/media/platforms/omx/OmxDataDecoder.cpp
@@ -989,21 +989,21 @@ MediaDataHelper::CreateYUV420VideoData(B
b.mPlanes[2].mOffset = 0;
b.mPlanes[2].mSkip = 0;
VideoInfo info(*mTrackInfo->GetAsVideoInfo());
RefPtr<VideoData> data =
VideoData::CreateAndCopyData(info,
mImageContainer,
0, // Filled later by caller.
- 0, // Filled later by caller.
+ media::TimeUnit::Zero(), // Filled later by caller.
media::TimeUnit::FromMicroseconds(1), // We don't know the duration.
b,
0, // Filled later by caller.
- -1,
+ media::TimeUnit::FromMicroseconds(-1),
info.ImageRect());
LOG("YUV420 VideoData: disp width %d, height %d, pic width %d, height %d, time %lld",
info.mDisplay.width, info.mDisplay.height, info.mImage.width,
info.mImage.height, aBufferData->mBuffer->nTimeStamp);
return data.forget();
}
--- a/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
@@ -331,18 +331,18 @@ WMFAudioMFTManager::Output(int64_t aStre
NS_ENSURE_TRUE(timestamp.IsValid(), E_FAIL);
mAudioFrameSum += numFrames;
media::TimeUnit duration = FramesToTimeUnit(numFrames, mAudioRate);
NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
aOutData = new AudioData(aStreamOffset,
- timestamp.ToMicroseconds(),
- duration.ToMicroseconds(),
+ timestamp,
+ duration,
numFrames,
Move(audioData),
mAudioChannels,
mAudioRate);
#ifdef LOG_SAMPLE_DECODE
LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",
timestamp.ToMicroseconds(), duration.ToMicroseconds(), currentLength);
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -834,21 +834,21 @@ WMFVideoMFTManager::CreateBasicVideoFram
nsIntRect pictureRegion = mVideoInfo.ScaledImageRect(videoWidth, videoHeight);
LayersBackend backend = GetCompositorBackendType(mKnowsCompositor);
if (backend != LayersBackend::LAYERS_D3D11) {
RefPtr<VideoData> v =
VideoData::CreateAndCopyData(mVideoInfo,
mImageContainer,
aStreamOffset,
- pts.ToMicroseconds(),
+ pts,
duration,
b,
false,
- -1,
+ TimeUnit::FromMicroseconds(-1),
pictureRegion);
if (twoDBuffer) {
twoDBuffer->Unlock2D();
} else {
buffer->Unlock();
}
v.forget(aOutVideoData);
return S_OK;
@@ -861,21 +861,21 @@ WMFVideoMFTManager::CreateBasicVideoFram
mVideoInfo,
b,
pictureRegion,
false);
RefPtr<VideoData> v =
VideoData::CreateFromImage(mVideoInfo.mDisplay,
aStreamOffset,
- pts.ToMicroseconds(),
+ pts,
duration,
image.forget(),
false,
- -1);
+ TimeUnit::FromMicroseconds(-1));
v.forget(aOutVideoData);
return S_OK;
}
HRESULT
WMFVideoMFTManager::CreateD3DVideoFrame(IMFSample* aSample,
int64_t aStreamOffset,
@@ -899,21 +899,21 @@ WMFVideoMFTManager::CreateD3DVideoFrame(
NS_ENSURE_TRUE(image, E_FAIL);
TimeUnit pts = GetSampleTime(aSample);
NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
TimeUnit duration = GetSampleDuration(aSample);
NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
RefPtr<VideoData> v = VideoData::CreateFromImage(mVideoInfo.mDisplay,
aStreamOffset,
- pts.ToMicroseconds(),
+ pts,
duration,
image.forget(),
false,
- -1);
+ TimeUnit::FromMicroseconds(-1));
NS_ENSURE_TRUE(v, E_FAIL);
v.forget(aOutVideoData);
return S_OK;
}
// Blocks until decoded sample is produced by the deoder.