Bug 1356530 - Change the type of MediaData::mTime to TimeUnit since int64_t is ambiguous. draft
authorJW Wang <jwwang@mozilla.com>
Fri, 14 Apr 2017 17:13:36 +0800
changeset 565755 f12c139975172709b31345455a988d142319ce05
parent 565532 c77c9ba75b45c73f0abf2565b445e93b6362aabe
child 625103 acee29eb799c44d84ca3550381cc9f71f7516390
push id54989
push userjwwang@mozilla.com
push dateThu, 20 Apr 2017 10:01:17 +0000
bugs1356530
milestone55.0a1
Bug 1356530 - Change the type of MediaData::mTime to TimeUnit since int64_t is ambiguous. MozReview-Commit-ID: 4bVeqIuWO2O
dom/media/ADTSDemuxer.cpp
dom/media/MP3Demuxer.cpp
dom/media/MediaData.cpp
dom/media/MediaData.h
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaFormatReader.cpp
dom/media/MediaQueue.h
dom/media/android/AndroidMediaReader.cpp
dom/media/flac/FlacDemuxer.cpp
dom/media/fmp4/MP4Demuxer.cpp
dom/media/gmp/ChromiumCDMParent.cpp
dom/media/gtest/TestMP4Demuxer.cpp
dom/media/ipc/VideoDecoderChild.cpp
dom/media/ipc/VideoDecoderParent.cpp
dom/media/mediasink/AudioSink.cpp
dom/media/mediasink/DecodedStream.cpp
dom/media/mediasink/VideoSink.cpp
dom/media/mediasource/MediaSourceDemuxer.cpp
dom/media/mediasource/TrackBuffersManager.cpp
dom/media/ogg/OggCodecState.cpp
dom/media/ogg/OggDemuxer.cpp
dom/media/platforms/agnostic/BlankDecoderModule.cpp
dom/media/platforms/agnostic/NullDecoderModule.cpp
dom/media/platforms/agnostic/OpusDecoder.cpp
dom/media/platforms/agnostic/TheoraDecoder.cpp
dom/media/platforms/agnostic/VPXDecoder.cpp
dom/media/platforms/agnostic/VorbisDecoder.cpp
dom/media/platforms/agnostic/WAVDecoder.cpp
dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
dom/media/platforms/android/RemoteDataDecoder.cpp
dom/media/platforms/apple/AppleATDecoder.cpp
dom/media/platforms/apple/AppleVTDecoder.cpp
dom/media/platforms/apple/AppleVTDecoder.h
dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
dom/media/platforms/omx/OmxDataDecoder.cpp
dom/media/platforms/omx/OmxPromiseLayer.cpp
dom/media/platforms/wmf/WMFAudioMFTManager.cpp
dom/media/platforms/wmf/WMFVideoMFTManager.cpp
dom/media/wave/WaveDemuxer.cpp
dom/media/webm/WebMDemuxer.cpp
media/libstagefright/binding/Index.cpp
--- a/dom/media/ADTSDemuxer.cpp
+++ b/dom/media/ADTSDemuxer.cpp
@@ -747,22 +747,22 @@ ADTSTrackDemuxer::GetNextFrame(const adt
   const uint32_t read = Read(frameWriter->Data(), offset, length);
   if (read != length) {
     ADTSLOG("GetNext() Exit read=%u frame->Size()=%" PRIuSIZE, read, frame->Size());
     return nullptr;
   }
 
   UpdateState(aFrame);
 
-  frame->mTime = Duration(mFrameIndex - 1).ToMicroseconds();
+  frame->mTime = Duration(mFrameIndex - 1);
   frame->mDuration = Duration(1);
-  frame->mTimecode = media::TimeUnit::FromMicroseconds(frame->mTime);
+  frame->mTimecode = frame->mTime;
   frame->mKeyframe = true;
 
-  MOZ_ASSERT(frame->mTime >= 0);
+  MOZ_ASSERT(!frame->mTime.IsNegative());
   MOZ_ASSERT(frame->mDuration.IsPositive());
 
   ADTSLOGV("GetNext() End mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64
            " mFrameIndex=%" PRId64 " mTotalFrameLen=%" PRIu64
            " mSamplesPerFrame=%d mSamplesPerSecond=%d mChannels=%d",
            mOffset, mNumParsedFrames, mFrameIndex, mTotalFrameLen,
            mSamplesPerFrame, mSamplesPerSecond, mChannels);
 
--- a/dom/media/MP3Demuxer.cpp
+++ b/dom/media/MP3Demuxer.cpp
@@ -599,22 +599,22 @@ MP3TrackDemuxer::GetNextFrame(const Medi
 
   if (read != aRange.Length()) {
     MP3LOG("GetNext() Exit read=%u frame->Size()=%" PRIuSIZE, read, frame->Size());
     return nullptr;
   }
 
   UpdateState(aRange);
 
-  frame->mTime = Duration(mFrameIndex - 1).ToMicroseconds();
+  frame->mTime = Duration(mFrameIndex - 1);
   frame->mDuration = Duration(1);
-  frame->mTimecode = media::TimeUnit::FromMicroseconds(frame->mTime);
+  frame->mTimecode = frame->mTime;
   frame->mKeyframe = true;
 
-  MOZ_ASSERT(frame->mTime >= 0);
+  MOZ_ASSERT(!frame->mTime.IsNegative());
   MOZ_ASSERT(frame->mDuration.IsPositive());
 
   if (mNumParsedFrames == 1) {
     // First frame parsed, let's read VBR info if available.
     ByteReader reader(frame->Data(), frame->Size());
     mParser.ParseVBRHeader(&reader);
     mFirstFrameOffset = frame->mOffset;
   }
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -232,17 +232,17 @@ VideoData::UpdateDuration(const TimeUnit
 void
 VideoData::UpdateTimestamp(const TimeUnit& aTimestamp)
 {
   MOZ_ASSERT(!aTimestamp.IsNegative());
 
   auto updatedDuration = GetEndTime() - aTimestamp;
   MOZ_ASSERT(!updatedDuration.IsNegative());
 
-  mTime = aTimestamp.ToMicroseconds();
+  mTime = aTimestamp;
   mDuration = updatedDuration;
 }
 
 /* static */
 bool VideoData::SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
                                     const VideoInfo& aInfo,
                                     const YCbCrBuffer &aBuffer,
                                     const IntRect& aPicture,
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -289,54 +289,54 @@ public:
 
   MediaData(Type aType,
             int64_t aOffset,
             int64_t aTimestamp,
             int64_t aDuration,
             uint32_t aFrames)
     : mType(aType)
     , mOffset(aOffset)
-    , mTime(aTimestamp)
+    , mTime(media::TimeUnit::FromMicroseconds(aTimestamp))
     , mTimecode(media::TimeUnit::FromMicroseconds(aTimestamp))
     , mDuration(media::TimeUnit::FromMicroseconds(aDuration))
     , mFrames(aFrames)
     , mKeyframe(false)
   {
   }
 
   // Type of contained data.
   const Type mType;
 
   // Approximate byte offset where this data was demuxed from its media.
   int64_t mOffset;
 
-  // Start time of sample, in microseconds.
-  int64_t mTime;
+  // Start time of sample.
+  media::TimeUnit mTime;
 
   // Codec specific internal time code. For Ogg based codecs this is the
   // granulepos.
   media::TimeUnit mTimecode;
 
   // Duration of sample, in microseconds.
   media::TimeUnit mDuration;
 
   // Amount of frames for contained data.
   const uint32_t mFrames;
 
   bool mKeyframe;
 
   media::TimeUnit GetEndTime() const
   {
-    return media::TimeUnit::FromMicroseconds(mTime) + mDuration;
+    return mTime + mDuration;
   }
 
   bool AdjustForStartTime(int64_t aStartTime)
   {
-    mTime = mTime - aStartTime;
-    return mTime >= 0;
+    mTime = mTime - media::TimeUnit::FromMicroseconds(aStartTime);
+    return !mTime.IsNegative();
   }
 
   template <typename ReturnType>
   const ReturnType* As() const
   {
     MOZ_ASSERT(this->mType == ReturnType::sType);
     return static_cast<const ReturnType*>(this);
   }
@@ -347,17 +347,16 @@ public:
     MOZ_ASSERT(this->mType == ReturnType::sType);
     return static_cast<ReturnType*>(this);
   }
 
 protected:
   MediaData(Type aType, uint32_t aFrames)
     : mType(aType)
     , mOffset(0)
-    , mTime(0)
     , mFrames(aFrames)
     , mKeyframe(false)
   {
   }
 
   virtual ~MediaData() { }
 
 };
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -1232,18 +1232,18 @@ private:
       RefPtr<AudioData> audio = AudioQueue().PeekFront();
       RefPtr<VideoData> video = VideoQueue().PeekFront();
 
       // A situation that both audio and video approaches the end.
       if (!audio && !video) {
         return seekTime;
       }
 
-      const int64_t audioStart = audio ? audio->mTime : INT64_MAX;
-      const int64_t videoStart = video ? video->mTime : INT64_MAX;
+      const int64_t audioStart = audio ? audio->mTime.ToMicroseconds() : INT64_MAX;
+      const int64_t videoStart = video ? video->mTime.ToMicroseconds() : INT64_MAX;
       const int64_t audioGap = std::abs(audioStart - seekTime.ToMicroseconds());
       const int64_t videoGap = std::abs(videoStart - seekTime.ToMicroseconds());
       return TimeUnit::FromMicroseconds(
         audioGap <= videoGap ? audioStart : videoStart);
     }
 
     MOZ_ASSERT(false, "AccurateSeekTask doesn't handle other seek types.");
     return TimeUnit::Zero();
@@ -1309,17 +1309,17 @@ private:
     MOZ_ASSERT(!mDoneVideoSeeking);
     mMaster->RequestVideoData(false, media::TimeUnit());
   }
 
   void AdjustFastSeekIfNeeded(MediaData* aSample)
   {
     if (mSeekJob.mTarget->IsFast()
         && mSeekJob.mTarget->GetTime() > mCurrentTimeBeforeSeek
-        && aSample->mTime < mCurrentTimeBeforeSeek.ToMicroseconds()) {
+        && aSample->mTime < mCurrentTimeBeforeSeek) {
       // We are doing a fastSeek, but we ended up *before* the previous
       // playback position. This is surprising UX, so switch to an accurate
       // seek and decode to the seek target. This is not conformant to the
       // spec, fastSeek should always be fast, but until we get the time to
       // change all Readers to seek to the keyframe after the currentTime
       // in this case, we'll just decode forward. Bug 1026330.
       mSeekJob.mTarget->SetType(SeekTarget::Accurate);
     }
@@ -1330,17 +1330,17 @@ private:
     MOZ_ASSERT(aAudio && mSeekJob.mTarget->IsAccurate());
 
     auto sampleDuration = FramesToTimeUnit(
       aAudio->mFrames, Info().mAudio.mRate);
     if (!sampleDuration.IsValid()) {
       return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
     }
 
-    auto audioTime = TimeUnit::FromMicroseconds(aAudio->mTime);
+    auto audioTime = aAudio->mTime;
     if (audioTime + sampleDuration <= mSeekJob.mTarget->GetTime()) {
       // Our seek target lies after the frames in this AudioData. Don't
       // push it onto the audio queue, and keep decoding forwards.
       return NS_OK;
     }
 
     if (audioTime > mSeekJob.mTarget->GetTime()) {
       // The seek target doesn't lie in the audio block just after the last
@@ -1400,38 +1400,38 @@ private:
 
     return NS_OK;
   }
 
   nsresult DropVideoUpToSeekTarget(VideoData* aVideo)
   {
     MOZ_ASSERT(aVideo);
     SLOG("DropVideoUpToSeekTarget() frame [%" PRId64 ", %" PRId64 "]",
-         aVideo->mTime, aVideo->GetEndTime().ToMicroseconds());
+         aVideo->mTime.ToMicroseconds(), aVideo->GetEndTime().ToMicroseconds());
     const auto target = mSeekJob.mTarget->GetTime();
 
     // If the frame end time is less than the seek target, we won't want
     // to display this frame after the seek, so discard it.
     if (target >= aVideo->GetEndTime()) {
       SLOG("DropVideoUpToSeekTarget() pop video frame [%" PRId64 ", %" PRId64 "] target=%" PRId64,
-           aVideo->mTime, aVideo->GetEndTime().ToMicroseconds(),
+           aVideo->mTime.ToMicroseconds(), aVideo->GetEndTime().ToMicroseconds(),
            target.ToMicroseconds());
       mFirstVideoFrameAfterSeek = aVideo;
     } else {
-      if (target.ToMicroseconds() >= aVideo->mTime &&
+      if (target >= aVideo->mTime &&
           aVideo->GetEndTime() >= target) {
         // The seek target lies inside this frame's time slice. Adjust the
         // frame's start time to match the seek target.
         aVideo->UpdateTimestamp(target);
       }
       mFirstVideoFrameAfterSeek = nullptr;
 
       SLOG("DropVideoUpToSeekTarget() found video frame [%" PRId64 ", %" PRId64 "] "
            "containing target=%" PRId64,
-           aVideo->mTime, aVideo->GetEndTime().ToMicroseconds(),
+           aVideo->mTime.ToMicroseconds(), aVideo->GetEndTime().ToMicroseconds(),
            target.ToMicroseconds());
 
       MOZ_ASSERT(VideoQueue().GetSize() == 0,
                  "Should be the 1st sample after seeking");
       mMaster->PushVideo(aVideo);
       mDoneVideoSeeking = true;
     }
 
@@ -1470,17 +1470,17 @@ private:
  * aCompare A function object with the signature bool(int64_t) which returns
  *          true for samples that should be removed.
  */
 template <typename Type, typename Function>
 static void
 DiscardFrames(MediaQueue<Type>& aQueue, const Function& aCompare)
 {
   while(aQueue.GetSize() > 0) {
-    if (aCompare(aQueue.PeekFront()->mTime)) {
+    if (aCompare(aQueue.PeekFront()->mTime.ToMicroseconds())) {
       RefPtr<Type> releaseMe = aQueue.PopFront();
       continue;
     }
     break;
   }
 }
 
 class MediaDecoderStateMachine::NextFrameSeekingState
@@ -1570,17 +1570,17 @@ private:
   }
 
   void HandleVideoDecoded(VideoData* aVideo, TimeStamp aDecodeStart) override
   {
     MOZ_ASSERT(aVideo);
     MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished");
     MOZ_ASSERT(NeedMoreVideo());
 
-    if (aVideo->mTime > mCurrentTime.ToMicroseconds()) {
+    if (aVideo->mTime > mCurrentTime) {
       mMaster->PushVideo(aVideo);
       FinishSeek();
     } else {
       RequestVideoData();
     }
   }
 
   void HandleWaitingForAudio() override
@@ -1662,17 +1662,17 @@ private:
 
   // Update the seek target's time before resolving this seek task, the updated
   // time will be used in the MDSM::SeekCompleted() to update the MDSM's
   // position.
   void UpdateSeekTargetTime()
   {
     RefPtr<VideoData> data = VideoQueue().PeekFront();
     if (data) {
-      mSeekJob.mTarget->SetTime(TimeUnit::FromMicroseconds(data->mTime));
+      mSeekJob.mTarget->SetTime(data->mTime);
     } else {
       MOZ_ASSERT(VideoQueue().AtEndOfStream());
       mSeekJob.mTarget->SetTime(mDuration);
     }
   }
 
   void FinishSeek()
   {
@@ -3172,17 +3172,18 @@ MediaDecoderStateMachine::RequestAudioDa
   mReader->RequestAudioData()->Then(
     OwnerThread(), __func__,
     [this, self] (AudioData* aAudio) {
       MOZ_ASSERT(aAudio);
       mAudioDataRequest.Complete();
       // audio->GetEndTime() is not always mono-increasing in chained ogg.
       mDecodedAudioEndTime = std::max(
         aAudio->GetEndTime(), mDecodedAudioEndTime);
-      LOGV("OnAudioDecoded [%" PRId64 ",%" PRId64 "]", aAudio->mTime,
+      LOGV("OnAudioDecoded [%" PRId64 ",%" PRId64 "]",
+           aAudio->mTime.ToMicroseconds(),
            aAudio->GetEndTime().ToMicroseconds());
       mStateObj->HandleAudioDecoded(aAudio);
     },
     [this, self] (const MediaResult& aError) {
       LOGV("OnAudioNotDecoded aError=%" PRIu32, static_cast<uint32_t>(aError.Code()));
       mAudioDataRequest.Complete();
       switch (aError.Code()) {
         case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA:
@@ -3218,17 +3219,18 @@ MediaDecoderStateMachine::RequestVideoDa
   mReader->RequestVideoData(aSkipToNextKeyframe, aCurrentTime)->Then(
     OwnerThread(), __func__,
     [this, self, videoDecodeStartTime] (VideoData* aVideo) {
       MOZ_ASSERT(aVideo);
       mVideoDataRequest.Complete();
       // Handle abnormal or negative timestamps.
       mDecodedVideoEndTime = std::max(
         mDecodedVideoEndTime, aVideo->GetEndTime());
-      LOGV("OnVideoDecoded [%" PRId64 ",%" PRId64 "]", aVideo->mTime,
+      LOGV("OnVideoDecoded [%" PRId64 ",%" PRId64 "]",
+           aVideo->mTime.ToMicroseconds(),
            aVideo->GetEndTime().ToMicroseconds());
       mStateObj->HandleVideoDecoded(aVideo, videoDecodeStartTime);
     },
     [this, self] (const MediaResult& aError) {
       LOGV("OnVideoNotDecoded aError=%" PRIu32 , static_cast<uint32_t>(aError.Code()));
       mVideoDataRequest.Complete();
       switch (aError.Code()) {
         case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA:
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -1719,17 +1719,17 @@ MediaFormatReader::OnAudioDemuxCompleted
 void
 MediaFormatReader::NotifyNewOutput(
   TrackType aTrack, const MediaDataDecoder::DecodedData& aResults)
 {
   MOZ_ASSERT(OnTaskQueue());
   auto& decoder = GetDecoderData(aTrack);
   for (auto& sample : aResults) {
     LOGV("Received new %s sample time:%" PRId64 " duration:%" PRId64,
-         TrackTypeToStr(aTrack), sample->mTime,
+         TrackTypeToStr(aTrack), sample->mTime.ToMicroseconds(),
          sample->mDuration.ToMicroseconds());
     decoder.mOutput.AppendElement(sample);
     decoder.mNumSamplesOutput++;
     decoder.mNumOfConsecutiveError = 0;
   }
   LOG("Done processing new %s samples", TrackTypeToStr(aTrack));
   ScheduleUpdate(aTrack);
 }
@@ -2006,29 +2006,29 @@ MediaFormatReader::HandleDemuxedSamples(
         }
       }
 
       decoder.mInfo = info;
 
       if (sample->mKeyframe) {
         ScheduleUpdate(aTrack);
       } else {
-        auto time = TimeInterval(
-          TimeUnit::FromMicroseconds(sample->mTime), sample->GetEndTime());
+        auto time = TimeInterval(sample->mTime, sample->GetEndTime());
         InternalSeekTarget seekTarget =
           decoder.mTimeThreshold.refOr(InternalSeekTarget(time, false));
         LOG("Stream change occurred on a non-keyframe. Seeking to:%" PRId64,
-            sample->mTime);
+            sample->mTime.ToMicroseconds());
         InternalSeek(aTrack, seekTarget);
       }
       return;
     }
 
     LOGV("Input:%" PRId64 " (dts:%" PRId64 " kf:%d)",
-         sample->mTime, sample->mTimecode.ToMicroseconds(), sample->mKeyframe);
+         sample->mTime.ToMicroseconds(), sample->mTimecode.ToMicroseconds(),
+         sample->mKeyframe);
     decoder.mNumSamplesInput++;
     decoder.mSizeOfQueue++;
     if (aTrack == TrackInfo::kVideoTrack) {
       aA.mStats.mParsedFrames++;
     }
 
     DecodeDemuxedSamples(aTrack, sample);
 
@@ -2181,70 +2181,70 @@ MediaFormatReader::Update(TrackType aTra
   // Record number of frames decoded and parsed. Automatically update the
   // stats counters using the AutoNotifyDecoded stack-based class.
   AbstractMediaDecoder::AutoNotifyDecoded a(mDecoder);
 
   // Drop any frames found prior our internal seek target.
   while (decoder.mTimeThreshold && decoder.mOutput.Length()) {
     RefPtr<MediaData>& output = decoder.mOutput[0];
     InternalSeekTarget target = decoder.mTimeThreshold.ref();
-    media::TimeUnit time = media::TimeUnit::FromMicroseconds(output->mTime);
+    media::TimeUnit time = output->mTime;
     if (time >= target.Time()) {
       // We have reached our internal seek target.
       decoder.mTimeThreshold.reset();
       // We might have dropped some keyframes.
       mPreviousDecodedKeyframeTime_us = sNoPreviousDecodedKeyframe;
     }
     if (time < target.Time() || (target.mDropTarget && target.Contains(time))) {
       LOGV("Internal Seeking: Dropping %s frame time:%f wanted:%f (kf:%d)",
            TrackTypeToStr(aTrack),
-           media::TimeUnit::FromMicroseconds(output->mTime).ToSeconds(),
+           output->mTime.ToSeconds(),
            target.Time().ToSeconds(),
            output->mKeyframe);
       decoder.mOutput.RemoveElementAt(0);
       decoder.mSizeOfQueue -= 1;
     }
   }
 
   while (decoder.mOutput.Length()
          && decoder.mOutput[0]->mType == MediaData::NULL_DATA) {
-    LOGV("Dropping null data. Time: %" PRId64, decoder.mOutput[0]->mTime);
+    LOGV("Dropping null data. Time: %" PRId64,
+         decoder.mOutput[0]->mTime.ToMicroseconds());
     decoder.mOutput.RemoveElementAt(0);
     decoder.mSizeOfQueue -= 1;
   }
 
   if (decoder.HasPromise()) {
     needOutput = true;
     if (decoder.mOutput.Length()) {
       RefPtr<MediaData> output = decoder.mOutput[0];
       decoder.mOutput.RemoveElementAt(0);
       decoder.mSizeOfQueue -= 1;
       decoder.mLastSampleTime =
-        Some(TimeInterval(TimeUnit::FromMicroseconds(output->mTime),
-                          output->GetEndTime()));
+        Some(TimeInterval(output->mTime, output->GetEndTime()));
       decoder.mNumSamplesOutputTotal++;
       ReturnOutput(output, aTrack);
       // We have a decoded sample ready to be returned.
       if (aTrack == TrackType::kVideoTrack) {
         uint64_t delta =
           decoder.mNumSamplesOutputTotal - mLastReportedNumDecodedFrames;
         a.mStats.mDecodedFrames = static_cast<uint32_t>(delta);
         mLastReportedNumDecodedFrames = decoder.mNumSamplesOutputTotal;
         if (output->mKeyframe) {
-          if (mPreviousDecodedKeyframeTime_us < output->mTime) {
+          if (mPreviousDecodedKeyframeTime_us < output->mTime.ToMicroseconds()) {
             // There is a previous keyframe -> Record inter-keyframe stats.
             uint64_t segment_us =
-              output->mTime - mPreviousDecodedKeyframeTime_us;
+              output->mTime.ToMicroseconds() - mPreviousDecodedKeyframeTime_us;
             a.mStats.mInterKeyframeSum_us += segment_us;
             a.mStats.mInterKeyframeCount += 1;
             if (a.mStats.mInterKeyFrameMax_us < segment_us) {
               a.mStats.mInterKeyFrameMax_us = segment_us;
             }
           }
-          mPreviousDecodedKeyframeTime_us = output->mTime;
+          mPreviousDecodedKeyframeTime_us = output->mTime.ToMicroseconds();
         }
         nsCString error;
         mVideo.mIsHardwareAccelerated =
           mVideo.mDecoder && mVideo.mDecoder->IsHardwareAccelerated(error);
       }
     } else if (decoder.HasFatalError()) {
       LOG("Rejecting %s promise: DECODE_ERROR", TrackTypeToStr(aTrack));
       decoder.RejectPromise(decoder.mError.ref(), __func__);
@@ -2374,17 +2374,17 @@ MediaFormatReader::Update(TrackType aTra
 }
 
 void
 MediaFormatReader::ReturnOutput(MediaData* aData, TrackType aTrack)
 {
   MOZ_ASSERT(GetDecoderData(aTrack).HasPromise());
   MOZ_DIAGNOSTIC_ASSERT(aData->mType != MediaData::NULL_DATA);
   LOG("Resolved data promise for %s [%" PRId64 ", %" PRId64 "]", TrackTypeToStr(aTrack),
-      aData->mTime, aData->GetEndTime().ToMicroseconds());
+      aData->mTime.ToMicroseconds(), aData->GetEndTime().ToMicroseconds());
 
   if (aTrack == TrackInfo::kAudioTrack) {
     AudioData* audioData = static_cast<AudioData*>(aData);
 
     if (audioData->mChannels != mInfo.mAudio.mChannels
         || audioData->mRate != mInfo.mAudio.mRate) {
       LOG("change of audio format (rate:%d->%d). "
           "This is an unsupported configuration",
@@ -2501,18 +2501,17 @@ MediaFormatReader::Reset(TrackType aTrac
 
 void
 MediaFormatReader::DropDecodedSamples(TrackType aTrack)
 {
   MOZ_ASSERT(OnTaskQueue());
   auto& decoder = GetDecoderData(aTrack);
   size_t lengthDecodedQueue = decoder.mOutput.Length();
   if (lengthDecodedQueue && decoder.mTimeThreshold.isSome()) {
-    TimeUnit time =
-      TimeUnit::FromMicroseconds(decoder.mOutput.LastElement()->mTime);
+    TimeUnit time = decoder.mOutput.LastElement()->mTime;
     if (time >= decoder.mTimeThreshold.ref().Time()) {
       // We would have reached our internal seek target.
       decoder.mTimeThreshold.reset();
     }
   }
   decoder.mOutput.Clear();
   decoder.mSizeOfQueue -= lengthDecodedQueue;
   if (aTrack == TrackInfo::kVideoTrack && mDecoder) {
@@ -3097,18 +3096,17 @@ MediaFormatReader::OnFirstDemuxCompleted
   MOZ_ASSERT(OnTaskQueue());
 
   if (mShutdown) {
     return;
   }
 
   auto& decoder = GetDecoderData(aType);
   MOZ_ASSERT(decoder.mFirstDemuxedSampleTime.isNothing());
-  decoder.mFirstDemuxedSampleTime.emplace(
-    TimeUnit::FromMicroseconds(aSamples->mSamples[0]->mTime));
+  decoder.mFirstDemuxedSampleTime.emplace(aSamples->mSamples[0]->mTime);
   MaybeResolveMetadataPromise();
 }
 
 void
 MediaFormatReader::OnFirstDemuxFailed(TrackInfo::TrackType aType,
                                       const MediaResult& aError)
 {
   MOZ_ASSERT(OnTaskQueue());
--- a/dom/media/MediaQueue.h
+++ b/dom/media/MediaQueue.h
@@ -42,17 +42,17 @@ public:
     return nsDeque::GetSize();
   }
 
   inline void Push(T* aItem) {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
     MOZ_ASSERT(!mEndOfStream);
     MOZ_ASSERT(aItem);
     NS_ADDREF(aItem);
-    MOZ_ASSERT(aItem->GetEndTime().ToMicroseconds() >= aItem->mTime);
+    MOZ_ASSERT(aItem->GetEndTime() >= aItem->mTime);
     nsDeque::Push(aItem);
     mPushEvent.Notify(RefPtr<T>(aItem));
   }
 
   inline already_AddRefed<T> PopFront() {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
     RefPtr<T> rv = dont_AddRef(static_cast<T*>(nsDeque::PopFront()));
     if (rv) {
@@ -99,17 +99,17 @@ public:
   // Returns the approximate number of microseconds of items in the queue.
   int64_t Duration() {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
     if (GetSize() == 0) {
       return 0;
     }
     T* last = static_cast<T*>(nsDeque::Peek());
     T* first = static_cast<T*>(nsDeque::PeekFront());
-    return last->GetEndTime().ToMicroseconds() - first->mTime;
+    return (last->GetEndTime() - first->mTime).ToMicroseconds();
   }
 
   void LockedForEach(nsDequeFunctor& aFunctor) const {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
     ForEach(aFunctor);
   }
 
   // Extracts elements from the queue into aResult, in order.
--- a/dom/media/android/AndroidMediaReader.cpp
+++ b/dom/media/android/AndroidMediaReader.cpp
@@ -136,17 +136,18 @@ bool AndroidMediaReader::DecodeVideoFram
     MPAPI::VideoFrame frame;
     if (!mPlugin->ReadVideo(mPlugin, &frame, mVideoSeekTimeUs, &bufferCallback)) {
       // We reached the end of the video stream. If we have a buffered
       // video frame, push it the video queue using the total duration
       // of the video as the end time.
       if (mLastVideoFrame) {
         int64_t durationUs;
         mPlugin->GetDuration(mPlugin, &durationUs);
-        durationUs = std::max<int64_t>(durationUs - mLastVideoFrame->mTime, 0);
+        durationUs = std::max<int64_t>(
+          durationUs - mLastVideoFrame->mTime.ToMicroseconds(), 0);
         mLastVideoFrame->UpdateDuration(TimeUnit::FromMicroseconds(durationUs));
         mVideoQueue.Push(mLastVideoFrame);
         mLastVideoFrame = nullptr;
       }
       return false;
     }
     mVideoSeekTimeUs = -1;
 
@@ -242,18 +243,18 @@ bool AndroidMediaReader::DecodeVideoFram
     if (!mLastVideoFrame) {
       mLastVideoFrame = v;
       continue;
     }
 
     // Calculate the duration as the timestamp of the current frame minus the
     // timestamp of the previous frame. We can then return the previously
     // decoded frame, and it will have a valid timestamp.
-    int64_t duration = v->mTime - mLastVideoFrame->mTime;
-    mLastVideoFrame->UpdateDuration(TimeUnit::FromMicroseconds(duration));
+    auto duration = v->mTime - mLastVideoFrame->mTime;
+    mLastVideoFrame->UpdateDuration(duration);
 
     // We have the start time of the next frame, so we can push the previous
     // frame into the queue, except if the end time is below the threshold,
     // in which case it wouldn't be displayed anyway.
     if (mLastVideoFrame->GetEndTime() < aTimeThreshold) {
       mLastVideoFrame = nullptr;
       continue;
     }
@@ -315,17 +316,17 @@ AndroidMediaReader::Seek(const SeekTarge
     // stream to the preceeding keyframe first, get the stream time, and then
     // seek the audio stream to match the video stream's time. Otherwise, the
     // audio and video streams won't be in sync after the seek.
     mVideoSeekTimeUs = aTarget.GetTime().ToMicroseconds();
 
     RefPtr<AndroidMediaReader> self = this;
     DecodeToFirstVideoData()->Then(OwnerThread(), __func__, [self] (MediaData* v) {
       self->mSeekRequest.Complete();
-      self->mAudioSeekTimeUs = v->mTime;
+      self->mAudioSeekTimeUs = v->mTime.ToMicroseconds();
       self->mSeekPromise.Resolve(media::TimeUnit::FromMicroseconds(self->mAudioSeekTimeUs), __func__);
     }, [self, aTarget] () {
       self->mSeekRequest.Complete();
       self->mAudioSeekTimeUs = aTarget.GetTime().ToMicroseconds();
       self->mSeekPromise.Resolve(aTarget.GetTime(), __func__);
     })->Track(mSeekRequest);
   } else {
     mAudioSeekTimeUs = mVideoSeekTimeUs = aTarget.GetTime().ToMicroseconds();
--- a/dom/media/flac/FlacDemuxer.cpp
+++ b/dom/media/flac/FlacDemuxer.cpp
@@ -975,23 +975,23 @@ FlacTrackDemuxer::GetNextFrame(const fla
   }
 
   const uint32_t read = Read(frameWriter->Data(), offset, size);
   if (read != size) {
     LOG("GetNextFrame() Exit read=%u frame->Size=%" PRIuSIZE, read, frame->Size());
     return nullptr;
   }
 
-  frame->mTime = aFrame.Time().ToMicroseconds();
+  frame->mTime = aFrame.Time();
   frame->mDuration = aFrame.Duration();
-  frame->mTimecode = TimeUnit::FromMicroseconds(frame->mTime);
+  frame->mTimecode = frame->mTime;
   frame->mOffset = aFrame.Offset();
   frame->mKeyframe = true;
 
-  MOZ_ASSERT(frame->mTime >= 0);
+  MOZ_ASSERT(!frame->mTime.IsNegative());
   MOZ_ASSERT(!frame->mDuration.IsNegative());
 
   return frame.forget();
 }
 
 int32_t
 FlacTrackDemuxer::Read(uint8_t* aBuffer, int64_t aOffset, int32_t aSize)
 {
--- a/dom/media/fmp4/MP4Demuxer.cpp
+++ b/dom/media/fmp4/MP4Demuxer.cpp
@@ -406,20 +406,20 @@ MP4TrackDemuxer::EnsureUpToDateIndex()
   }
   mIndex->UpdateMoofIndex(byteRanges);
   mNeedReIndex = false;
 }
 
 RefPtr<MP4TrackDemuxer::SeekPromise>
 MP4TrackDemuxer::Seek(const media::TimeUnit& aTime)
 {
-  int64_t seekTime = aTime.ToMicroseconds();
+  auto seekTime = aTime;
   mQueuedSample = nullptr;
 
-  mIterator->Seek(seekTime);
+  mIterator->Seek(seekTime.ToMicroseconds());
 
   // Check what time we actually seeked to.
   RefPtr<MediaRawData> sample;
   do {
     sample = GetNextSample();
     if (!sample) {
       return SeekPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_END_OF_STREAM,
                                           __func__);
@@ -431,18 +431,17 @@ MP4TrackDemuxer::Seek(const media::TimeU
     if (sample->mKeyframe) {
       mQueuedSample = sample;
       seekTime = mQueuedSample->mTime;
     }
   } while (!mQueuedSample);
 
   SetNextKeyFrameTime();
 
-  return SeekPromise::CreateAndResolve(
-    media::TimeUnit::FromMicroseconds(seekTime), __func__);
+  return SeekPromise::CreateAndResolve(seekTime, __func__);
 }
 
 already_AddRefed<MediaRawData>
 MP4TrackDemuxer::GetNextSample()
 {
   RefPtr<MediaRawData> sample = mIterator->GetNext();
   if (!sample) {
     return nullptr;
@@ -456,29 +455,31 @@ MP4TrackDemuxer::GetNextSample()
         case mp4_demuxer::H264::FrameType::I_FRAME: MOZ_FALLTHROUGH;
         case mp4_demuxer::H264::FrameType::OTHER:
         {
           bool keyframe = type == mp4_demuxer::H264::FrameType::I_FRAME;
           if (sample->mKeyframe != keyframe) {
             NS_WARNING(nsPrintfCString("Frame incorrectly marked as %skeyframe "
                                        "@ pts:%" PRId64 " dur:%" PRId64
                                        " dts:%" PRId64,
-                                       keyframe ? "" : "non-", sample->mTime,
+                                       keyframe ? "" : "non-",
+                                       sample->mTime.ToMicroseconds(),
                                        sample->mDuration.ToMicroseconds(),
                                        sample->mTimecode.ToMicroseconds())
                          .get());
             sample->mKeyframe = keyframe;
           }
           break;
         }
         case mp4_demuxer::H264::FrameType::INVALID:
           NS_WARNING(
             nsPrintfCString("Invalid H264 frame @ pts:%" PRId64 " dur:%" PRId64
                             " dts:%" PRId64,
-                            sample->mTime, sample->mDuration.ToMicroseconds(),
+                            sample->mTime.ToMicroseconds(),
+                            sample->mDuration.ToMicroseconds(),
                             sample->mTimecode.ToMicroseconds())
               .get());
           // We could reject the sample now, however demuxer errors are fatal.
           // So we keep the invalid frame, relying on the H264 decoder to
           // handle the error later.
           // TODO: make demuxer errors non-fatal.
           break;
       }
@@ -535,17 +536,17 @@ MP4TrackDemuxer::GetSamples(int32_t aNum
       RefPtr<MediaByteBuffer> extradata =
         mp4_demuxer::AnnexB::ExtractExtraData(sample);
       mNeedSPSForTelemetry = AccumulateSPSTelemetry(extradata);
     }
   }
 
   if (mNextKeyframeTime.isNothing()
       || samples->mSamples.LastElement()->mTime
-      >= mNextKeyframeTime.value().ToMicroseconds()) {
+      >= mNextKeyframeTime.value()) {
     SetNextKeyFrameTime();
   }
   return SamplesPromise::CreateAndResolve(samples, __func__);
 }
 
 void
 MP4TrackDemuxer::SetNextKeyFrameTime()
 {
@@ -585,17 +586,17 @@ MP4TrackDemuxer::SkipToNextRandomAccessP
 {
   mQueuedSample = nullptr;
   // Loop until we reach the next keyframe after the threshold.
   uint32_t parsed = 0;
   bool found = false;
   RefPtr<MediaRawData> sample;
   while (!found && (sample = GetNextSample())) {
     parsed++;
-    if (sample->mKeyframe && sample->mTime >= aTimeThreshold.ToMicroseconds()) {
+    if (sample->mKeyframe && sample->mTime >= aTimeThreshold) {
       found = true;
       mQueuedSample = sample;
     }
   }
   SetNextKeyFrameTime();
   if (found) {
     return SkipAccessPointPromise::CreateAndResolve(parsed, __func__);
   }
--- a/dom/media/gmp/ChromiumCDMParent.cpp
+++ b/dom/media/gmp/ChromiumCDMParent.cpp
@@ -185,17 +185,17 @@ ChromiumCDMParent::InitCDMInputBuffer(gm
   if (!AllocShmem(aSample->Size(), Shmem::SharedMemory::TYPE_BASIC, &shmem)) {
     return false;
   }
   memcpy(shmem.get<uint8_t>(), aSample->Data(), aSample->Size());
 
   aBuffer = gmp::CDMInputBuffer(shmem,
                                 crypto.mKeyId,
                                 crypto.mIV,
-                                aSample->mTime,
+                                aSample->mTime.ToMicroseconds(),
                                 aSample->mDuration.ToMicroseconds(),
                                 crypto.mPlainSizes,
                                 crypto.mEncryptedSizes,
                                 crypto.mValid);
   return true;
 }
 
 bool
@@ -830,17 +830,17 @@ ChromiumCDMParent::DecryptAndDecodeFrame
   if (mIsShutdown) {
     return MediaDataDecoder::DecodePromise::CreateAndReject(
       MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                   RESULT_DETAIL("ChromiumCDMParent is shutdown")),
       __func__);
   }
 
   GMP_LOG("ChromiumCDMParent::DecryptAndDecodeFrame t=%" PRId64,
-          aSample->mTime);
+          aSample->mTime.ToMicroseconds());
 
   CDMInputBuffer buffer;
 
   if (!InitCDMInputBuffer(buffer, aSample)) {
     return MediaDataDecoder::DecodePromise::CreateAndReject(
       MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, "Failed to init CDM buffer."),
       __func__);
   }
--- a/dom/media/gtest/TestMP4Demuxer.cpp
+++ b/dom/media/gtest/TestMP4Demuxer.cpp
@@ -58,36 +58,36 @@ public:
   RefPtr<GenericPromise>
   CheckTrackKeyFrame(MediaTrackDemuxer* aTrackDemuxer)
   {
     MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
 
     RefPtr<MediaTrackDemuxer> track = aTrackDemuxer;
     RefPtr<MP4DemuxerBinding> binding = this;
 
-    int64_t time = -1;
+    auto time = media::TimeUnit::Invalid();
     while (mIndex < mSamples.Length()) {
       uint32_t i = mIndex++;
       if (mSamples[i]->mKeyframe) {
         time = mSamples[i]->mTime;
         break;
       }
     }
 
     RefPtr<GenericPromise> p = mCheckTrackKeyFramePromise.Ensure(__func__);
 
-    if (time == -1) {
+    if (!time.IsValid()) {
       mCheckTrackKeyFramePromise.Resolve(true, __func__);
       return p;
     }
 
 
     DispatchTask(
       [track, time, binding] () {
-        track->Seek(media::TimeUnit::FromMicroseconds(time))->Then(binding->mTaskQueue, __func__,
+        track->Seek(time)->Then(binding->mTaskQueue, __func__,
           [track, time, binding] () {
             track->GetSamples()->Then(binding->mTaskQueue, __func__,
               [track, time, binding] (RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples) {
                 EXPECT_EQ(time, aSamples->mSamples[0]->mTime);
                 binding->CheckTrackKeyFrame(track);
               },
               DO_FAIL
             );
--- a/dom/media/ipc/VideoDecoderChild.cpp
+++ b/dom/media/ipc/VideoDecoderChild.cpp
@@ -225,17 +225,17 @@ VideoDecoderChild::Decode(MediaRawData* 
   if (!AllocShmem(aSample->Size(), Shmem::SharedMemory::TYPE_BASIC, &buffer)) {
     return MediaDataDecoder::DecodePromise::CreateAndReject(
       NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__);
   }
 
   memcpy(buffer.get<uint8_t>(), aSample->Data(), aSample->Size());
 
   MediaRawDataIPDL sample(MediaDataIPDL(aSample->mOffset,
-                                        aSample->mTime,
+                                        aSample->mTime.ToMicroseconds(),
                                         aSample->mTimecode.ToMicroseconds(),
                                         aSample->mDuration.ToMicroseconds(),
                                         aSample->mFrames,
                                         aSample->mKeyframe),
                           buffer);
   SendInput(sample);
   return mDecodePromise.Ensure(__func__);
 }
--- a/dom/media/ipc/VideoDecoderParent.cpp
+++ b/dom/media/ipc/VideoDecoderParent.cpp
@@ -132,17 +132,17 @@ VideoDecoderParent::RecvInput(const Medi
   RefPtr<MediaRawData> data = new MediaRawData(aData.buffer().get<uint8_t>(),
                                                aData.buffer().Size<uint8_t>());
   if (aData.buffer().Size<uint8_t>() && !data->Data()) {
     // OOM
     Error(NS_ERROR_OUT_OF_MEMORY);
     return IPC_OK();
   }
   data->mOffset = aData.base().offset();
-  data->mTime = aData.base().time();
+  data->mTime = media::TimeUnit::FromMicroseconds(aData.base().time());
   data->mTimecode = media::TimeUnit::FromMicroseconds(aData.base().timecode());
   data->mDuration = media::TimeUnit::FromMicroseconds(aData.base().duration());
   data->mKeyframe = aData.base().keyframe();
 
   DeallocShmem(aData.buffer());
 
   RefPtr<VideoDecoderParent> self = this;
   mDecoder->Decode(data)->Then(
@@ -186,17 +186,18 @@ VideoDecoderParent::ProcessDecodedData(
     }
 
     if (texture && !texture->IsAddedToCompositableClient()) {
       texture->InitIPDLActor(mKnowsCompositor);
       texture->SetAddedToCompositableClient();
     }
 
     VideoDataIPDL output(
-      MediaDataIPDL(data->mOffset, data->mTime, data->mTimecode.ToMicroseconds(),
+      MediaDataIPDL(data->mOffset, data->mTime.ToMicroseconds(),
+                    data->mTimecode.ToMicroseconds(),
                     data->mDuration.ToMicroseconds(),
                     data->mFrames, data->mKeyframe),
       video->mDisplay,
       texture ? texture->GetSize() : IntSize(),
       texture ? mParent->StoreImage(video->mImage, texture)
               : SurfaceDescriptorGPUVideo(0),
       video->mFrameID);
     Unused << SendOutput(output);
--- a/dom/media/mediasink/AudioSink.cpp
+++ b/dom/media/mediasink/AudioSink.cpp
@@ -280,17 +280,18 @@ AudioSink::PopFrames(uint32_t aFrames)
     MOZ_ASSERT(mCurrentData->mFrames > 0);
     mProcessedQueueLength -=
       FramesToUsecs(mCurrentData->mFrames, mOutputRate).value();
   }
 
   auto framesToPop = std::min(aFrames, mCursor->Available());
 
   SINK_LOG_V("playing audio at time=%" PRId64 " offset=%u length=%u",
-             mCurrentData->mTime, mCurrentData->mFrames - mCursor->Available(), framesToPop);
+             mCurrentData->mTime.ToMicroseconds(),
+             mCurrentData->mFrames - mCursor->Available(), framesToPop);
 
   UniquePtr<AudioStream::Chunk> chunk =
     MakeUnique<Chunk>(mCurrentData, framesToPop, mCursor->Ptr());
 
   {
     MonitorAutoLock mon(mMonitor);
     mWritten += framesToPop;
     mCursor->Advance(framesToPop);
@@ -401,18 +402,18 @@ AudioSink::NotifyAudioNeeded()
           AudioConfig(data->mChannels, data->mRate),
           AudioConfig(mOutputChannels, mOutputRate));
     }
 
     // See if there's a gap in the audio. If there is, push silence into the
     // audio hardware, so we can play across the gap.
     // Calculate the timestamp of the next chunk of audio in numbers of
     // samples.
-    CheckedInt64 sampleTime = TimeUnitToFrames(
-      TimeUnit::FromMicroseconds(data->mTime) - mStartTime, data->mRate);
+    CheckedInt64 sampleTime =
+      TimeUnitToFrames(data->mTime - mStartTime, data->mRate);
     // Calculate the number of frames that have been pushed onto the audio hardware.
     CheckedInt64 missingFrames = sampleTime - mFramesParsed;
 
     if (!missingFrames.isValid()) {
       NS_WARNING("Int overflow in AudioSink");
       mErrored = true;
       return;
     }
@@ -496,17 +497,17 @@ AudioSink::CreateAudioFromBuffer(Aligned
   CheckedInt64 duration = FramesToUsecs(frames, mOutputRate);
   if (!duration.isValid()) {
     NS_WARNING("Int overflow in AudioSink");
     mErrored = true;
     return nullptr;
   }
   RefPtr<AudioData> data =
     new AudioData(aReference->mOffset,
-                  aReference->mTime,
+                  aReference->mTime.ToMicroseconds(),
                   duration.value(),
                   frames,
                   Move(aBuffer),
                   mOutputChannels,
                   mOutputRate);
   return data.forget();
 }
 
--- a/dom/media/mediasink/DecodedStream.cpp
+++ b/dom/media/mediasink/DecodedStream.cpp
@@ -456,17 +456,17 @@ SendStreamAudio(DecodedStreamData* aStre
   static const int64_t AUDIO_FUZZ_FRAMES = 1;
 
   MOZ_ASSERT(aData);
   AudioData* audio = aData;
   // This logic has to mimic AudioSink closely to make sure we write
   // the exact same silences
   CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten
     + TimeUnitToFrames(aStartTime, aRate);
-  CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate);
+  CheckedInt64 frameOffset = TimeUnitToFrames(audio->mTime, aRate);
 
   if (!audioWrittenOffset.isValid() ||
       !frameOffset.isValid() ||
       // ignore packet that we've already processed
       audio->GetEndTime() <= aStream->mNextAudioTime) {
     return;
   }
 
@@ -590,32 +590,31 @@ DecodedStream::SendVideo(bool aIsSameOri
   TimeStamp tracksStartTimeStamp = sourceStream->GetStreamTracksStrartTimeStamp();
   if (tracksStartTimeStamp.IsNull()) {
     tracksStartTimeStamp = TimeStamp::Now();
   }
 
   for (uint32_t i = 0; i < video.Length(); ++i) {
     VideoData* v = video[i];
 
-    if (mData->mNextVideoTime.ToMicroseconds() < v->mTime) {
+    if (mData->mNextVideoTime < v->mTime) {
       // Write last video frame to catch up. mLastVideoImage can be null here
       // which is fine, it just means there's no video.
 
       // TODO: |mLastVideoImage| should come from the last image rendered
       // by the state machine. This will avoid the black frame when capture
       // happens in the middle of playback (especially in th middle of a
       // video frame). E.g. if we have a video frame that is 30 sec long
       // and capture happens at 15 sec, we'll have to append a black frame
       // that is 15 sec long.
-      WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage,
-        FromMicroseconds(v->mTime),
+      WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage, v->mTime,
         mData->mNextVideoTime, mData->mLastVideoImageDisplaySize,
-        tracksStartTimeStamp + TimeDuration::FromMicroseconds(v->mTime),
+        tracksStartTimeStamp + v->mTime.ToTimeDuration(),
         &output, aPrincipalHandle);
-      mData->mNextVideoTime = FromMicroseconds(v->mTime);
+      mData->mNextVideoTime = v->mTime;
     }
 
     if (mData->mNextVideoTime < v->GetEndTime()) {
       WriteVideoToMediaStream(sourceStream, v->mImage, v->GetEndTime(),
         mData->mNextVideoTime, v->mDisplay,
         tracksStartTimeStamp + v->GetEndTime().ToTimeDuration(),
         &output, aPrincipalHandle);
       mData->mNextVideoTime = v->GetEndTime();
@@ -741,17 +740,17 @@ DecodedStream::GetPosition(TimeStamp* aT
   return mStartTime.ref() + mLastOutputTime;
 }
 
 void
 DecodedStream::NotifyOutput(int64_t aTime)
 {
   AssertOwnerThread();
   mLastOutputTime = FromMicroseconds(aTime);
-  int64_t currentTime = GetPosition().ToMicroseconds();
+  auto currentTime = GetPosition();
 
   // Remove audio samples that have been played by MSG from the queue.
   RefPtr<AudioData> a = mAudioQueue.PeekFront();
   for (; a && a->mTime < currentTime;) {
     RefPtr<AudioData> releaseMe = mAudioQueue.PopFront();
     a = mAudioQueue.PeekFront();
   }
 }
--- a/dom/media/mediasink/VideoSink.cpp
+++ b/dom/media/mediasink/VideoSink.cpp
@@ -360,26 +360,25 @@ VideoSink::RenderVideoFrames(int32_t aMa
 
     frame->MarkSentToCompositor();
 
     if (!frame->mImage || !frame->mImage->IsValid() ||
         !frame->mImage->GetSize().width || !frame->mImage->GetSize().height) {
       continue;
     }
 
-    int64_t frameTime = frame->mTime;
-    if (frameTime < 0) {
+    if (frame->mTime.IsNegative()) {
       // Frame times before the start time are invalid; drop such frames
       continue;
     }
 
     TimeStamp t;
     if (aMaxFrames > 1) {
       MOZ_ASSERT(!aClockTimeStamp.IsNull());
-      int64_t delta = frame->mTime - aClockTime;
+      int64_t delta = frame->mTime.ToMicroseconds() - aClockTime;
       t = aClockTimeStamp +
           TimeDuration::FromMicroseconds(delta / params.mPlaybackRate);
       if (!lastFrameTime.IsNull() && t <= lastFrameTime) {
         // Timestamps out of order; drop the new frame. In theory we should
         // probably replace the previous frame with the new frame if the
         // timestamps are equal, but this is a corrupt video file already so
         // never mind.
         continue;
@@ -389,17 +388,18 @@ VideoSink::RenderVideoFrames(int32_t aMa
 
     ImageContainer::NonOwningImage* img = images.AppendElement();
     img->mTimeStamp = t;
     img->mImage = frame->mImage;
     img->mFrameID = frame->mFrameID;
     img->mProducerID = mProducerID;
 
     VSINK_LOG_V("playing video frame %" PRId64 " (id=%x) (vq-queued=%" PRIuSIZE ")",
-                frame->mTime, frame->mFrameID, VideoQueue().GetSize());
+                frame->mTime.ToMicroseconds(), frame->mFrameID,
+                VideoQueue().GetSize());
   }
 
   if (images.Length() > 0) {
     mContainer->SetCurrentFrames(frames[0]->mDisplay, images);
   }
 }
 
 void
@@ -419,17 +419,17 @@ VideoSink::UpdateRenderedVideoFrames()
          clockTime >= VideoQueue().PeekFront()->GetEndTime()) {
     RefPtr<VideoData> frame = VideoQueue().PopFront();
     lastFrameEndTime = frame->GetEndTime();
     if (frame->IsSentToCompositor()) {
       mFrameStats.NotifyPresentedFrame();
     } else {
       mFrameStats.NotifyDecodedFrames({ 0, 0, 1 });
       VSINK_LOG_V("discarding video frame mTime=%" PRId64 " clock_time=%" PRId64,
-                  frame->mTime, clockTime.ToMicroseconds());
+                  frame->mTime.ToMicroseconds(), clockTime.ToMicroseconds());
     }
   }
 
   // The presentation end time of the last video frame displayed is either
   // the end time of the current frame, or if we dropped all frames in the
   // queue, the end time of the last frame we removed from the queue.
   RefPtr<VideoData> currentFrame = VideoQueue().PeekFront();
   mVideoFrameEndTime = std::max(mVideoFrameEndTime,
@@ -445,17 +445,17 @@ VideoSink::UpdateRenderedVideoFrames()
   // the start time of the next frame. If we don't have a next frame,
   // we will run render loops again upon incoming frames.
   nsTArray<RefPtr<VideoData>> frames;
   VideoQueue().GetFirstElements(2, &frames);
   if (frames.Length() < 2) {
     return;
   }
 
-  int64_t nextFrameTime = frames[1]->mTime;
+  int64_t nextFrameTime = frames[1]->mTime.ToMicroseconds();
   int64_t delta = std::max(
     nextFrameTime - clockTime.ToMicroseconds(), MIN_UPDATE_INTERVAL_US);
   TimeStamp target = nowTime + TimeDuration::FromMicroseconds(
      delta / mAudioSink->GetPlaybackParams().mPlaybackRate);
 
   RefPtr<VideoSink> self = this;
   mUpdateScheduler.Ensure(target, [self] () {
     self->UpdateRenderedVideoFramesByTimer();
--- a/dom/media/mediasource/MediaSourceDemuxer.cpp
+++ b/dom/media/mediasource/MediaSourceDemuxer.cpp
@@ -474,17 +474,17 @@ MediaSourceTrackDemuxer::DoGetSamples(in
           ? NS_ERROR_DOM_MEDIA_END_OF_STREAM
           : NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA, __func__);
       }
       return SamplesPromise::CreateAndReject(result, __func__);
     }
   }
   RefPtr<SamplesHolder> samples = new SamplesHolder;
   samples->mSamples.AppendElement(sample);
-  if (mNextRandomAccessPoint.ToMicroseconds() <= sample->mTime) {
+  if (mNextRandomAccessPoint <= sample->mTime) {
     MonitorAutoLock mon(mMonitor);
     mNextRandomAccessPoint =
       mManager->GetNextRandomAccessPoint(mType, MediaSourceDemuxer::EOS_FUZZ);
   }
   return SamplesPromise::CreateAndResolve(samples, __func__);
 }
 
 RefPtr<MediaSourceTrackDemuxer::SkipAccessPointPromise>
--- a/dom/media/mediasource/TrackBuffersManager.cpp
+++ b/dom/media/mediasource/TrackBuffersManager.cpp
@@ -454,18 +454,18 @@ TrackBuffersManager::DoEvictData(const T
   }
 
   const int64_t finalSize = mSizeSourceBuffer - aSizeToEvict;
 
   if (lastKeyFrameIndex > 0) {
     MSE_DEBUG("Step1. Evicting %" PRId64 " bytes prior currentTime",
               aSizeToEvict - toEvict);
     CodedFrameRemoval(
-      TimeInterval(TimeUnit::FromMicroseconds(0),
-                   TimeUnit::FromMicroseconds(buffer[lastKeyFrameIndex]->mTime - 1)));
+      TimeInterval(TimeUnit::Zero(),
+                   buffer[lastKeyFrameIndex]->mTime - TimeUnit::FromMicroseconds(1)));
   }
 
   if (mSizeSourceBuffer <= finalSize) {
     return;
   }
 
   toEvict = mSizeSourceBuffer - finalSize;
 
@@ -482,29 +482,29 @@ TrackBuffersManager::DoEvictData(const T
     return;
   }
 
   // Don't evict before the end of the current segment
   TimeUnit upperLimit = futureBuffered[0].mEnd;
   uint32_t evictedFramesStartIndex = buffer.Length();
   for (int32_t i = buffer.Length() - 1; i >= 0; i--) {
     const auto& frame = buffer[i];
-    if (frame->mTime <= upperLimit.ToMicroseconds() || toEvict < 0) {
+    if (frame->mTime <= upperLimit || toEvict < 0) {
       // We've reached a frame that shouldn't be evicted -> Evict after it -> i+1.
       // Or the previous loop reached the eviction threshold -> Evict from it -> i+1.
       evictedFramesStartIndex = i + 1;
       break;
     }
     toEvict -= frame->ComputedSizeOfIncludingThis();
   }
   if (evictedFramesStartIndex < buffer.Length()) {
     MSE_DEBUG("Step2. Evicting %" PRId64 " bytes from trailing data",
               mSizeSourceBuffer - finalSize - toEvict);
     CodedFrameRemoval(
-      TimeInterval(TimeUnit::FromMicroseconds(buffer[evictedFramesStartIndex]->mTime),
+      TimeInterval(buffer[evictedFramesStartIndex]->mTime,
                    TimeUnit::FromInfinity()));
   }
 }
 
 RefPtr<TrackBuffersManager::RangeRemovalPromise>
 TrackBuffersManager::CodedFrameRemovalWithPromise(TimeInterval aInterval)
 {
   MOZ_ASSERT(OnTaskQueue());
@@ -554,18 +554,18 @@ TrackBuffersManager::CodedFrameRemoval(T
       // Nothing to remove.
       continue;
     }
 
     // 2. If this track buffer has a random access point timestamp that is greater than or equal to end,
     // then update remove end timestamp to that random access point timestamp.
     if (end < track->mBufferedRanges.GetEnd()) {
       for (auto& frame : track->GetTrackBuffer()) {
-        if (frame->mKeyframe && frame->mTime >= end.ToMicroseconds()) {
-          removeEndTimestamp = TimeUnit::FromMicroseconds(frame->mTime);
+        if (frame->mKeyframe && frame->mTime >= end) {
+          removeEndTimestamp = frame->mTime;
           break;
         }
       }
     }
 
     // 3. Remove all media data, from this track buffer, that contain starting
     // timestamps greater than or equal to start and less than the remove end timestamp.
     // 4. Remove decoding dependencies of the coded frames removed in the previous step:
@@ -1415,24 +1415,22 @@ TrackBuffersManager::CheckSequenceDiscon
     mSourceBufferAttributes->ResetGroupStartTimestamp();
   }
 }
 
 TimeInterval
 TrackBuffersManager::PresentationInterval(const TrackBuffer& aSamples) const
 {
   TimeInterval presentationInterval =
-    TimeInterval(TimeUnit::FromMicroseconds(aSamples[0]->mTime),
-                 aSamples[0]->GetEndTime());
+    TimeInterval(aSamples[0]->mTime, aSamples[0]->GetEndTime());
 
   for (uint32_t i = 1; i < aSamples.Length(); i++) {
     auto& sample = aSamples[i];
     presentationInterval = presentationInterval.Span(
-      TimeInterval(TimeUnit::FromMicroseconds(sample->mTime),
-                   sample->GetEndTime()));
+      TimeInterval(sample->mTime, sample->GetEndTime()));
   }
   return presentationInterval;
 }
 
 void
 TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
 {
   if (!aSamples.Length()) {
@@ -1440,18 +1438,18 @@ TrackBuffersManager::ProcessFrames(Track
   }
 
   // 1. If generate timestamps flag equals true
   // Let presentation timestamp equal 0.
   // Otherwise
   // Let presentation timestamp be a double precision floating point representation of the coded frame's presentation timestamp in seconds.
   TimeUnit presentationTimestamp =
     mSourceBufferAttributes->mGenerateTimestamps
-    ? TimeUnit()
-    : TimeUnit::FromMicroseconds(aSamples[0]->mTime);
+    ? TimeUnit::Zero()
+    : aSamples[0]->mTime;
 
   // 3. If mode equals "sequence" and group start timestamp is set, then run the following steps:
   CheckSequenceDiscontinuity(presentationTimestamp);
 
   // 5. Let track buffer equal the track buffer that the coded frame will be added to.
   auto& trackBuffer = aTrackData;
 
   // Some videos do not exactly start at 0, but instead a small negative value.
@@ -1483,17 +1481,17 @@ TrackBuffersManager::ProcessFrames(Track
   if (aSamples.Length()) {
     aTrackData.mLastParsedEndTime = TimeUnit();
   }
 
   for (auto& sample : aSamples) {
     SAMPLE_DEBUG("Processing %s frame(pts:%" PRId64 " end:%" PRId64 ", dts:%" PRId64 ", duration:%" PRId64 ", "
                "kf:%d)",
                aTrackData.mInfo->mMimeType.get(),
-               sample->mTime,
+               sample->mTime.ToMicroseconds(),
                sample->GetEndTime().ToMicroseconds(),
                sample->mTimecode.ToMicroseconds(),
                sample->mDuration.ToMicroseconds(),
                sample->mKeyframe);
 
     const TimeUnit sampleEndTime = sample->GetEndTime();
     if (sampleEndTime > aTrackData.mLastParsedEndTime) {
       aTrackData.mLastParsedEndTime = sampleEndTime;
@@ -1519,17 +1517,17 @@ TrackBuffersManager::ProcessFrames(Track
     // Otherwise:
     //   Let presentation timestamp be a double precision floating point representation of the coded frame's presentation timestamp in seconds.
     //   Let decode timestamp be a double precision floating point representation of the coded frame's decode timestamp in seconds.
 
     // 2. Let frame duration be a double precision floating point representation of the coded frame's duration in seconds.
     // Step 3 is performed earlier or when a discontinuity has been detected.
     // 4. If timestampOffset is not 0, then run the following steps:
 
-    TimeUnit sampleTime = TimeUnit::FromMicroseconds(sample->mTime);
+    TimeUnit sampleTime = sample->mTime;
     TimeUnit sampleTimecode = sample->mTimecode;
     TimeUnit sampleDuration = sample->mDuration;
     TimeUnit timestampOffset = mSourceBufferAttributes->GetTimestampOffset();
 
     TimeInterval sampleInterval =
       mSourceBufferAttributes->mGenerateTimestamps
       ? TimeInterval(timestampOffset, timestampOffset + sampleDuration)
       : TimeInterval(timestampOffset + sampleTime,
@@ -1613,17 +1611,17 @@ TrackBuffersManager::ProcessFrames(Track
       }
       trackBuffer.mNeedRandomAccessPoint = true;
       needDiscontinuityCheck = true;
       continue;
     }
 
     samplesRange += sampleInterval;
     sizeNewSamples += sample->ComputedSizeOfIncludingThis();
-    sample->mTime = sampleInterval.mStart.ToMicroseconds();
+    sample->mTime = sampleInterval.mStart;
     sample->mTimecode = decodeTimestamp;
     sample->mTrackInfo = trackBuffer.mLastInfo;
     samples.AppendElement(sample);
 
     // Steps 11,12,13,14, 15 and 16 will be done in one block in InsertFrames.
 
     trackBuffer.mLongestFrameDuration =
       trackBuffer.mLastFrameDuration.isSome()
@@ -1689,17 +1687,17 @@ TrackBuffersManager::CheckNextInsertionI
     // No target found, it will be added at the end of the track buffer.
     aTrackData.mNextInsertionIndex = Some(uint32_t(data.Length()));
     return true;
   }
   // We now need to find the first frame of the searched interval.
   // We will insert our new frames right before.
   for (uint32_t i = 0; i < data.Length(); i++) {
     const RefPtr<MediaRawData>& sample = data[i];
-    if (sample->mTime >= target.mStart.ToMicroseconds() ||
+    if (sample->mTime >= target.mStart ||
         sample->GetEndTime() > target.mStart) {
       aTrackData.mNextInsertionIndex = Some(i);
       return true;
     }
   }
   NS_ASSERTION(false, "Insertion Index Not Found");
   return false;
 }
@@ -1759,18 +1757,17 @@ TrackBuffersManager::InsertFrames(TrackB
     uint32_t index =
       RemoveFrames(aIntervals, trackBuffer, trackBuffer.mNextInsertionIndex.refOr(0));
     if (index) {
       trackBuffer.mNextInsertionIndex = Some(index);
     }
   }
 
   // 16. Add the coded frame with the presentation timestamp, decode timestamp, and frame duration to the track buffer.
-  if (!CheckNextInsertionIndex(aTrackData,
-                               TimeUnit::FromMicroseconds(aSamples[0]->mTime))) {
+  if (!CheckNextInsertionIndex(aTrackData, aSamples[0]->mTime)) {
     RejectProcessing(NS_ERROR_FAILURE, __func__);
     return;
   }
 
   // Adjust our demuxing index if necessary.
   if (trackBuffer.mNextGetSampleIndex.isSome()) {
     if (trackBuffer.mNextInsertionIndex.ref() == trackBuffer.mNextGetSampleIndex.ref() &&
         aIntervals.GetEnd() >= trackBuffer.mNextSampleTime) {
@@ -1832,18 +1829,17 @@ TrackBuffersManager::RemoveFrames(const 
   //   Remove all coded frames from track buffer that have a presentation timestamp greater than or equal to presentation timestamp and less than frame end timestamp.
   //  If highest end timestamp for track buffer is set and less than or equal to presentation timestamp:
   //   Remove all coded frames from track buffer that have a presentation timestamp greater than or equal to highest end timestamp and less than frame end timestamp"
   TimeUnit intervalsEnd = aIntervals.GetEnd();
   bool mayBreakLoop = false;
   for (uint32_t i = aStartIndex; i < data.Length(); i++) {
     const RefPtr<MediaRawData> sample = data[i];
     TimeInterval sampleInterval =
-      TimeInterval(TimeUnit::FromMicroseconds(sample->mTime),
-                   sample->GetEndTime());
+      TimeInterval(sample->mTime, sample->GetEndTime());
     if (aIntervals.Contains(sampleInterval)) {
       if (firstRemovedIndex.isNothing()) {
         firstRemovedIndex = Some(i);
       }
       lastRemovedIndex = i;
       mayBreakLoop = false;
       continue;
     }
@@ -1870,18 +1866,17 @@ TrackBuffersManager::RemoveFrames(const 
   }
 
   TimeUnit maxSampleDuration;
   uint32_t sizeRemoved = 0;
   TimeIntervals removedIntervals;
   for (uint32_t i = firstRemovedIndex.ref(); i <= lastRemovedIndex; i++) {
     const RefPtr<MediaRawData> sample = data[i];
     TimeInterval sampleInterval =
-      TimeInterval(TimeUnit::FromMicroseconds(sample->mTime),
-                   sample->GetEndTime());
+      TimeInterval(sample->mTime, sample->GetEndTime());
     removedIntervals += sampleInterval;
     if (sample->mDuration > maxSampleDuration) {
       maxSampleDuration = sample->mDuration;
     }
     sizeRemoved += sample->ComputedSizeOfIncludingThis();
   }
   aTrackData.mSizeBuffer -= sizeRemoved;
 
@@ -1933,25 +1928,24 @@ TrackBuffersManager::RemoveFrames(const 
   aTrackData.mSanitizedBufferedRanges.SetFuzz(maxSampleDuration/2);
 
   data.RemoveElementsAt(firstRemovedIndex.ref(),
                         lastRemovedIndex - firstRemovedIndex.ref() + 1);
 
   if (aIntervals.GetEnd() >= aTrackData.mHighestStartTimestamp) {
     // The sample with the highest presentation time got removed.
     // Rescan the trackbuffer to determine the new one.
-    int64_t highestStartTime = 0;
+    TimeUnit highestStartTime;
     for (const auto& sample : data) {
       if (sample->mTime > highestStartTime) {
         highestStartTime = sample->mTime;
       }
     }
     MonitorAutoLock mon(mMonitor);
-    aTrackData.mHighestStartTimestamp =
-      TimeUnit::FromMicroseconds(highestStartTime);
+    aTrackData.mHighestStartTimestamp = highestStartTime;
   }
 
   return firstRemovedIndex.ref();
 }
 
 void
 TrackBuffersManager::RecreateParser(bool aReuseInitData)
 {
@@ -2111,17 +2105,17 @@ TrackBuffersManager::GetTrackBuffer(Trac
 
 uint32_t TrackBuffersManager::FindSampleIndex(const TrackBuffer& aTrackBuffer,
                                               const TimeInterval& aInterval)
 {
   TimeUnit target = aInterval.mStart - aInterval.mFuzz;
 
   for (uint32_t i = 0; i < aTrackBuffer.Length(); i++) {
     const RefPtr<MediaRawData>& sample = aTrackBuffer[i];
-    if (sample->mTime >= target.ToMicroseconds() ||
+    if (sample->mTime >= target ||
         sample->GetEndTime() > target) {
       return i;
     }
   }
   NS_ASSERTION(false, "FindSampleIndex called with invalid arguments");
 
   return 0;
 }
@@ -2160,17 +2154,17 @@ TrackBuffersManager::Seek(TrackInfo::Tra
     i = FindSampleIndex(track, target);
   }
 
   Maybe<TimeUnit> lastKeyFrameTime;
   TimeUnit lastKeyFrameTimecode;
   uint32_t lastKeyFrameIndex = 0;
   for (; i < track.Length(); i++) {
     const RefPtr<MediaRawData>& sample = track[i];
-    TimeUnit sampleTime = TimeUnit::FromMicroseconds(sample->mTime);
+    TimeUnit sampleTime = sample->mTime;
     if (sampleTime > aTime && lastKeyFrameTime.isSome()) {
       break;
     }
     if (sample->mKeyframe) {
       lastKeyFrameTimecode = sample->mTimecode;
       lastKeyFrameTime = Some(sampleTime);
       lastKeyFrameIndex = i;
     }
@@ -2233,41 +2227,40 @@ TrackBuffersManager::SkipToNextRandomAcc
                 i,
                 nextSampleTimecode,
                 nextSampleTime,
                 aFuzz);
     if (!sample) {
       break;
     }
     if (sample->mKeyframe &&
-        sample->mTime >= aTimeThreadshold.ToMicroseconds()) {
+        sample->mTime >= aTimeThreadshold) {
       aFound = true;
       break;
     }
     nextSampleTimecode = sample->mTimecode + sample->mDuration;
     nextSampleTime = sample->GetEndTime();
     parsed++;
   }
 
   // Adjust the next demux time and index so that the next call to
   // SkipToNextRandomAccessPoint will not count again the parsed sample as
   // skipped.
   if (aFound) {
     trackData.mNextSampleTimecode = track[i]->mTimecode;
-    trackData.mNextSampleTime =
-       TimeUnit::FromMicroseconds(track[i]->mTime);
+    trackData.mNextSampleTime = track[i]->mTime;
     trackData.mNextGetSampleIndex = Some(i);
   } else if (i > 0) {
     // Go back to the previous keyframe or the original position so the next
     // demux can succeed and be decoded.
     for (int j = i - 1; j >= originalPos; j--) {
       const RefPtr<MediaRawData>& sample = track[j];
       if (sample->mKeyframe) {
         trackData.mNextSampleTimecode = sample->mTimecode;
-        trackData.mNextSampleTime = TimeUnit::FromMicroseconds(sample->mTime);
+        trackData.mNextSampleTime = sample->mTime;
         trackData.mNextGetSampleIndex = Some(uint32_t(j));
         // We are unable to skip to a keyframe past aTimeThreshold, however
         // we are speeding up decoding by dropping the unplayable frames.
         // So we can mark aFound as true.
         aFound = true;
         break;
       }
       parsed--;
@@ -2293,17 +2286,17 @@ TrackBuffersManager::GetSample(TrackInfo
 
   if (aIndex >= track.Length()) {
     // reached the end.
     return nullptr;
   }
 
   const RefPtr<MediaRawData>& sample = track[aIndex];
   if (!aIndex || sample->mTimecode <= aExpectedDts + aFuzz ||
-      sample->mTime <= (aExpectedPts + aFuzz).ToMicroseconds()) {
+      sample->mTime <= aExpectedPts + aFuzz) {
     return sample;
   }
 
   // Gap is too big. End of Stream or Waiting for Data.
   // TODO, check that we have continuous data based on the sanitized buffered
   // range instead.
   return nullptr;
 }
@@ -2361,18 +2354,17 @@ TrackBuffersManager::GetSample(TrackInfo
       GetSample(aTrack,
                 trackData.mNextGetSampleIndex.ref(),
                 nextSampleTimecode,
                 nextSampleTime,
                 aFuzz);
     if (nextSample) {
       // We have a valid next sample, can use exact values.
       trackData.mNextSampleTimecode = nextSample->mTimecode;
-      trackData.mNextSampleTime =
-        TimeUnit::FromMicroseconds(nextSample->mTime);
+      trackData.mNextSampleTime = nextSample->mTime;
     } else {
       // Next sample isn't available yet. Use estimates.
       trackData.mNextSampleTimecode = nextSampleTimecode;
       trackData.mNextSampleTime = nextSampleTime;
     }
     aResult = NS_OK;
     return p.forget();
   }
@@ -2457,17 +2449,17 @@ TrackBuffersManager::FindCurrentPosition
     }
   }
 
   // We couldn't find our sample by decode timestamp. Attempt to find it using
   // presentation timestamp. There will likely be small jerkiness.
   for (uint32_t i = 0; i < track.Length(); i++) {
     const RefPtr<MediaRawData>& sample = track[i];
     TimeInterval sampleInterval{
-      TimeUnit::FromMicroseconds(sample->mTime),
+      sample->mTime,
       sample->GetEndTime(),
       aFuzz};
 
     if (sampleInterval.ContainsWithStrictEnd(trackData.mNextSampleTimecode)) {
       return i;
     }
   }
 
@@ -2497,17 +2489,17 @@ TrackBuffersManager::GetNextRandomAccess
 
   for (; i < track.Length(); i++) {
     const MediaRawData* sample =
       GetSample(aTrack, i, nextSampleTimecode, nextSampleTime, aFuzz);
     if (!sample) {
       break;
     }
     if (sample->mKeyframe) {
-      return TimeUnit::FromMicroseconds(sample->mTime);
+      return sample->mTime;
     }
     nextSampleTimecode = sample->mTimecode + sample->mDuration;
     nextSampleTime = sample->GetEndTime();
   }
   return TimeUnit::FromInfinity();
 }
 
 void
--- a/dom/media/ogg/OggCodecState.cpp
+++ b/dom/media/ogg/OggCodecState.cpp
@@ -255,17 +255,17 @@ OggCodecState::PacketOutAsMediaRawData()
 
   int64_t end_tstamp = Time(packet->granulepos);
   NS_ASSERTION(end_tstamp >= 0, "timestamp invalid");
 
   int64_t duration = PacketDuration(packet.get());
   NS_ASSERTION(duration >= 0, "duration invalid");
 
   sample->mTimecode = media::TimeUnit::FromMicroseconds(packet->granulepos);
-  sample->mTime = end_tstamp - duration;
+  sample->mTime = media::TimeUnit::FromMicroseconds(end_tstamp - duration);
   sample->mDuration = media::TimeUnit::FromMicroseconds(duration);
   sample->mKeyframe = IsKeyframe(packet.get());
   sample->mEOS = packet->e_o_s;
 
   return sample.forget();
 }
 
 nsresult
--- a/dom/media/ogg/OggDemuxer.cpp
+++ b/dom/media/ogg/OggDemuxer.cpp
@@ -1312,17 +1312,17 @@ OggTrackDemuxer::Seek(const TimeUnit& aT
   // actual time seeked to. Typically the random access point time
   mQueuedSample = nullptr;
   TimeUnit seekTime = aTime;
   if (mParent->SeekInternal(mType, aTime) == NS_OK) {
     RefPtr<MediaRawData> sample(NextSample());
 
     // Check what time we actually seeked to.
     if (sample != nullptr) {
-      seekTime = TimeUnit::FromMicroseconds(sample->mTime);
+      seekTime = sample->mTime;
       OGG_DEBUG("%p seeked to time %" PRId64, this, seekTime.ToMicroseconds());
     }
     mQueuedSample = sample;
 
     return SeekPromise::CreateAndResolve(seekTime, __func__);
   } else {
     return SeekPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_DEMUXER_ERR, __func__);
   }
@@ -1398,25 +1398,24 @@ OggTrackDemuxer::SkipToNextRandomAccessP
 {
   uint32_t parsed = 0;
   bool found = false;
   RefPtr<MediaRawData> sample;
 
   OGG_DEBUG("TimeThreshold: %f", aTimeThreshold.ToSeconds());
   while (!found && (sample = NextSample())) {
     parsed++;
-    if (sample->mKeyframe && sample->mTime >= aTimeThreshold.ToMicroseconds()) {
+    if (sample->mKeyframe && sample->mTime >= aTimeThreshold) {
       found = true;
       mQueuedSample = sample;
     }
   }
   if (found) {
     OGG_DEBUG("next sample: %f (parsed: %d)",
-               TimeUnit::FromMicroseconds(sample->mTime).ToSeconds(),
-               parsed);
+               sample->mTime.ToSeconds(), parsed);
     return SkipAccessPointPromise::CreateAndResolve(parsed, __func__);
   } else {
     SkipFailureHolder failure(NS_ERROR_DOM_MEDIA_END_OF_STREAM, parsed);
     return SkipAccessPointPromise::CreateAndReject(Move(failure), __func__);
   }
 }
 
 TimeIntervals
--- a/dom/media/platforms/agnostic/BlankDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/BlankDecoderModule.cpp
@@ -70,21 +70,21 @@ BlankVideoDataCreator::Create(MediaRawDa
   buffer.mPlanes[2].mHeight = (mFrameHeight + 1) / 2;
   buffer.mPlanes[2].mWidth = (mFrameWidth + 1) / 2;
   buffer.mPlanes[2].mOffset = 0;
   buffer.mPlanes[2].mSkip = 0;
 
   return VideoData::CreateAndCopyData(mInfo,
                                       mImageContainer,
                                       aSample->mOffset,
-                                      aSample->mTime,
+                                      aSample->mTime.ToMicroseconds(),
                                       aSample->mDuration,
                                       buffer,
                                       aSample->mKeyframe,
-                                      aSample->mTime,
+                                      aSample->mTime.ToMicroseconds(),
                                       mPicture);
 }
 
 BlankAudioDataCreator::BlankAudioDataCreator(uint32_t aChannelCount, uint32_t aSampleRate)
   : mFrameSum(0), mChannelCount(aChannelCount), mSampleRate(aSampleRate)
 {
 }
 
@@ -111,17 +111,17 @@ BlankAudioDataCreator::Create(MediaRawDa
   for (int i = 0; i < frames.value(); i++) {
     float f = sin(2 * pi * noteHz * mFrameSum / mSampleRate);
     for (unsigned c = 0; c < mChannelCount; c++) {
       samples[i * mChannelCount + c] = AudioDataValue(f);
     }
     mFrameSum++;
   }
   RefPtr<AudioData> data(new AudioData(aSample->mOffset,
-                                       aSample->mTime,
+                                       aSample->mTime.ToMicroseconds(),
                                        aSample->mDuration.ToMicroseconds(),
                                        uint32_t(frames.value()),
                                        Move(samples),
                                        mChannelCount,
                                        mSampleRate));
   return data.forget();
 }
 
--- a/dom/media/platforms/agnostic/NullDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/NullDecoderModule.cpp
@@ -12,17 +12,17 @@ class NullVideoDataCreator : public Dumm
 public:
   NullVideoDataCreator() {}
 
   already_AddRefed<MediaData> Create(MediaRawData* aSample) override
   {
     // Create a dummy VideoData with no image. This gives us something to
     // send to media streams if necessary.
     RefPtr<VideoData> v(new VideoData(aSample->mOffset,
-                                      aSample->mTime,
+                                      aSample->mTime.ToMicroseconds(),
                                       aSample->mDuration.ToMicroseconds(),
                                       aSample->mKeyframe,
                                       aSample->mTimecode.ToMicroseconds(),
                                       gfx::IntSize(),
                                       0));
     return v.forget();
   }
 };
--- a/dom/media/platforms/agnostic/OpusDecoder.cpp
+++ b/dom/media/platforms/agnostic/OpusDecoder.cpp
@@ -162,20 +162,21 @@ OpusDataDecoder::ProcessDecode(MediaRawD
     // decoding after a padding discard is invalid.
     OPUS_DEBUG("Opus error, discard padding on interstitial packet");
     return DecodePromise::CreateAndReject(
       MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                   RESULT_DETAIL("Discard padding on interstitial packet")),
       __func__);
   }
 
-  if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime) {
+  if (!mLastFrameTime ||
+      mLastFrameTime.ref() != aSample->mTime.ToMicroseconds()) {
     // We are starting a new block.
     mFrames = 0;
-    mLastFrameTime = Some(aSample->mTime);
+    mLastFrameTime = Some(aSample->mTime.ToMicroseconds());
   }
 
   // Maximum value is 63*2880, so there's no chance of overflow.
   int frames_number =
     opus_packet_get_nb_frames(aSample->Data(), aSample->Size());
   if (frames_number <= 0) {
     OPUS_DEBUG("Invalid packet header: r=%d length=%" PRIuSIZE, frames_number,
                aSample->Size());
@@ -226,17 +227,17 @@ OpusDataDecoder::ProcessDecode(MediaRawD
 #endif
   if (ret < 0) {
     return DecodePromise::CreateAndReject(
       MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                   RESULT_DETAIL("Opus decoding error:%d", ret)),
       __func__);
   }
   NS_ASSERTION(ret == frames, "Opus decoded too few audio samples");
-  CheckedInt64 startTime = aSample->mTime;
+  CheckedInt64 startTime = aSample->mTime.ToMicroseconds();
 
   // Trim the initial frames while the decoder is settling.
   if (mSkip > 0) {
     int32_t skipFrames = std::min<int32_t>(mSkip, frames);
     int32_t keepFrames = frames - skipFrames;
     OPUS_DEBUG(
       "Opus decoder skipping %d of %d frames", skipFrames, frames);
     PodMove(buffer.get(),
--- a/dom/media/platforms/agnostic/TheoraDecoder.cpp
+++ b/dom/media/platforms/agnostic/TheoraDecoder.cpp
@@ -167,17 +167,17 @@ TheoraDecoder::ProcessDecode(MediaRawDat
                         mTheoraInfo.pic_width, mTheoraInfo.pic_height);
 
     VideoInfo info;
     info.mDisplay = mInfo.mDisplay;
     RefPtr<VideoData> v =
       VideoData::CreateAndCopyData(info,
                                    mImageContainer,
                                    aSample->mOffset,
-                                   aSample->mTime,
+                                   aSample->mTime.ToMicroseconds(),
                                    aSample->mDuration,
                                    b,
                                    aSample->mKeyframe,
                                    aSample->mTimecode.ToMicroseconds(),
                                    mInfo.ScaledImageRect(mTheoraInfo.frame_width,
                                                          mTheoraInfo.frame_height));
     if (!v) {
       LOG(
--- a/dom/media/platforms/agnostic/VPXDecoder.cpp
+++ b/dom/media/platforms/agnostic/VPXDecoder.cpp
@@ -202,34 +202,34 @@ VPXDecoder::ProcessDecode(MediaRawData* 
         __func__);
     }
 
     RefPtr<VideoData> v;
     if (!img_alpha) {
       v = VideoData::CreateAndCopyData(mInfo,
                                        mImageContainer,
                                        aSample->mOffset,
-                                       aSample->mTime,
+                                       aSample->mTime.ToMicroseconds(),
                                        aSample->mDuration,
                                        b,
                                        aSample->mKeyframe,
                                        aSample->mTimecode.ToMicroseconds(),
                                        mInfo.ScaledImageRect(img->d_w,
                                                              img->d_h));
     } else {
       VideoData::YCbCrBuffer::Plane alpha_plane;
       alpha_plane.mData = img_alpha->planes[0];
       alpha_plane.mStride = img_alpha->stride[0];
       alpha_plane.mHeight = img_alpha->d_h;
       alpha_plane.mWidth = img_alpha->d_w;
       alpha_plane.mOffset = alpha_plane.mSkip = 0;
       v = VideoData::CreateAndCopyData(mInfo,
                                        mImageContainer,
                                        aSample->mOffset,
-                                       aSample->mTime,
+                                       aSample->mTime.ToMicroseconds(),
                                        aSample->mDuration,
                                        b,
                                        alpha_plane,
                                        aSample->mKeyframe,
                                        aSample->mTimecode.ToMicroseconds(),
                                        mInfo.ScaledImageRect(img->d_w,
                                                              img->d_h));
 
--- a/dom/media/platforms/agnostic/VorbisDecoder.cpp
+++ b/dom/media/platforms/agnostic/VorbisDecoder.cpp
@@ -136,25 +136,26 @@ VorbisDataDecoder::Decode(MediaRawData* 
 RefPtr<MediaDataDecoder::DecodePromise>
 VorbisDataDecoder::ProcessDecode(MediaRawData* aSample)
 {
   MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
 
   const unsigned char* aData = aSample->Data();
   size_t aLength = aSample->Size();
   int64_t aOffset = aSample->mOffset;
-  int64_t aTstampUsecs = aSample->mTime;
+  int64_t aTstampUsecs = aSample->mTime.ToMicroseconds();
   int64_t aTotalFrames = 0;
 
   MOZ_ASSERT(mPacketCount >= 3);
 
-  if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime) {
+  if (!mLastFrameTime ||
+      mLastFrameTime.ref() != aSample->mTime.ToMicroseconds()) {
     // We are starting a new block.
     mFrames = 0;
-    mLastFrameTime = Some(aSample->mTime);
+    mLastFrameTime = Some(aSample->mTime.ToMicroseconds());
   }
 
   ogg_packet pkt = InitVorbisPacket(
     aData, aLength, false, aSample->mEOS,
     aSample->mTimecode.ToMicroseconds(), mPacketCount++);
 
   int err = vorbis_synthesis(&mVorbisBlock, &pkt);
   if (err) {
--- a/dom/media/platforms/agnostic/WAVDecoder.cpp
+++ b/dom/media/platforms/agnostic/WAVDecoder.cpp
@@ -74,17 +74,17 @@ WaveDataDecoder::Decode(MediaRawData* aS
 }
 
 RefPtr<MediaDataDecoder::DecodePromise>
 WaveDataDecoder::ProcessDecode(MediaRawData* aSample)
 {
   size_t aLength = aSample->Size();
   ByteReader aReader(aSample->Data(), aLength);
   int64_t aOffset = aSample->mOffset;
-  uint64_t aTstampUsecs = aSample->mTime;
+  uint64_t aTstampUsecs = aSample->mTime.ToMicroseconds();
 
   int32_t frames = aLength * 8 / mInfo.mBitDepth / mInfo.mChannels;
 
   AlignedAudioBuffer buffer(frames * mInfo.mChannels);
   if (!buffer) {
     return DecodePromise::CreateAndReject(
       MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
   }
--- a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
+++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
@@ -195,17 +195,17 @@ GMPVideoDecoder::CreateFrame(MediaRawDat
       buf += length;
     }
   }
 
   frame->SetBufferType(GMP_BufferLength32);
 
   frame->SetEncodedWidth(mConfig.mDisplay.width);
   frame->SetEncodedHeight(mConfig.mDisplay.height);
-  frame->SetTimeStamp(aSample->mTime);
+  frame->SetTimeStamp(aSample->mTime.ToMicroseconds());
   frame->SetCompleteFrame(true);
   frame->SetDuration(aSample->mDuration.ToMicroseconds());
   frame->SetFrameType(aSample->mKeyframe ? kGMPKeyFrame : kGMPDeltaFrame);
 
   return frame;
 }
 
 const VideoInfo&
--- a/dom/media/platforms/android/RemoteDataDecoder.cpp
+++ b/dom/media/platforms/android/RemoteDataDecoder.cpp
@@ -221,17 +221,17 @@ public:
   {
     const VideoInfo* config = aSample->mTrackInfo
                               ? aSample->mTrackInfo->GetAsVideoInfo()
                               : &mConfig;
     MOZ_ASSERT(config);
 
     InputInfo info(
       aSample->mDuration.ToMicroseconds(), config->mImage, config->mDisplay);
-    mInputInfos.Insert(aSample->mTime, info);
+    mInputInfos.Insert(aSample->mTime.ToMicroseconds(), info);
     return RemoteDataDecoder::Decode(aSample);
   }
 
   bool SupportDecoderRecycling() const override
   {
     return mIsCodecSupportAdaptivePlayback;
   }
 
@@ -532,17 +532,17 @@ RemoteDataDecoder::Decode(MediaRawData* 
       const_cast<uint8_t*>(sample->Data()), sample->Size());
 
     BufferInfo::LocalRef bufferInfo;
     nsresult rv = BufferInfo::New(&bufferInfo);
     if (NS_FAILED(rv)) {
       return DecodePromise::CreateAndReject(
         MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
     }
-    bufferInfo->Set(0, sample->Size(), sample->mTime, 0);
+    bufferInfo->Set(0, sample->Size(), sample->mTime.ToMicroseconds(), 0);
 
     mDrainStatus = DrainStatus::DRAINABLE;
     return mJavaDecoder->Input(bytes, bufferInfo, GetCryptoInfoFromSample(sample))
            ? mDecodePromise.Ensure(__func__)
            : DecodePromise::CreateAndReject(
                MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
 
   });
--- a/dom/media/platforms/apple/AppleATDecoder.cpp
+++ b/dom/media/platforms/apple/AppleATDecoder.cpp
@@ -62,17 +62,17 @@ AppleATDecoder::Init()
 
   return InitPromise::CreateAndResolve(TrackType::kAudioTrack, __func__);
 }
 
 RefPtr<MediaDataDecoder::DecodePromise>
 AppleATDecoder::Decode(MediaRawData* aSample)
 {
   LOG("mp4 input sample %p %lld us %lld pts%s %llu bytes audio", aSample,
-      aSample->mDuration.ToMicroseconds(), aSample->mTime,
+      aSample->mDuration.ToMicroseconds(), aSample->mTime.ToMicroseconds(),
       aSample->mKeyframe ? " keyframe" : "",
       (unsigned long long)aSample->Size());
   RefPtr<AppleATDecoder> self = this;
   RefPtr<MediaRawData> sample = aSample;
   return InvokeAsync(mTaskQueue, __func__, [self, this, sample] {
     return ProcessDecode(sample);
   });
 }
@@ -265,17 +265,17 @@ AppleATDecoder::DecodeSample(MediaRawDat
                                                   &numFrames /* in/out */,
                                                   &decBuffer,
                                                   packets.get());
 
     if (rv && rv != kNoMoreDataErr) {
       LOG("Error decoding audio sample: %d\n", static_cast<int>(rv));
       return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                          RESULT_DETAIL("Error decoding audio sample: %d @ %lld",
-                                       static_cast<int>(rv), aSample->mTime));
+                                       static_cast<int>(rv), aSample->mTime.ToMicroseconds()));
     }
 
     if (numFrames) {
       outputData.AppendElements(decoded.get(), numFrames * channels);
     }
 
     if (rv == kNoMoreDataErr) {
       break;
@@ -318,17 +318,17 @@ AppleATDecoder::DecodeSample(MediaRawDat
     mAudioConverter = MakeUnique<AudioConverter>(in, out);
   }
   if (mAudioConverter) {
     MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
     data = mAudioConverter->Process(Move(data));
   }
 
   RefPtr<AudioData> audio = new AudioData(aSample->mOffset,
-                                          aSample->mTime,
+                                          aSample->mTime.ToMicroseconds(),
                                           duration.ToMicroseconds(),
                                           numFrames,
                                           data.Forget(),
                                           channels,
                                           rate);
   mDecodedSamples.AppendElement(Move(audio));
   return NS_OK;
 }
--- a/dom/media/platforms/apple/AppleVTDecoder.cpp
+++ b/dom/media/platforms/apple/AppleVTDecoder.cpp
@@ -73,17 +73,17 @@ AppleVTDecoder::Init()
   return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
 }
 
 RefPtr<MediaDataDecoder::DecodePromise>
 AppleVTDecoder::Decode(MediaRawData* aSample)
 {
   LOG("mp4 input sample %p pts %lld duration %lld us%s %" PRIuSIZE " bytes",
       aSample,
-      aSample->mTime,
+      aSample->mTime.ToMicroseconds(),
       aSample->mDuration.ToMicroseconds(),
       aSample->mKeyframe ? " keyframe" : "",
       aSample->Size());
 
   RefPtr<AppleVTDecoder> self = this;
   RefPtr<MediaRawData> sample = aSample;
   return InvokeAsync(mTaskQueue, __func__, [self, this, sample] {
     RefPtr<DecodePromise> p;
@@ -127,17 +127,17 @@ AppleVTDecoder::Shutdown()
 static CMSampleTimingInfo
 TimingInfoFromSample(MediaRawData* aSample)
 {
   CMSampleTimingInfo timestamp;
 
   timestamp.duration = CMTimeMake(
     aSample->mDuration.ToMicroseconds(), USECS_PER_S);
   timestamp.presentationTimeStamp =
-    CMTimeMake(aSample->mTime, USECS_PER_S);
+    CMTimeMake(aSample->mTime.ToMicroseconds(), USECS_PER_S);
   timestamp.decodeTimeStamp =
     CMTimeMake(aSample->mTimecode.ToMicroseconds(), USECS_PER_S);
 
   return timestamp;
 }
 
 void
 AppleVTDecoder::ProcessDecode(MediaRawData* aSample)
--- a/dom/media/platforms/apple/AppleVTDecoder.h
+++ b/dom/media/platforms/apple/AppleVTDecoder.h
@@ -28,17 +28,17 @@ public:
     media::TimeUnit decode_timestamp;
     media::TimeUnit composition_timestamp;
     media::TimeUnit duration;
     int64_t byte_offset;
     bool is_sync_point;
 
     explicit AppleFrameRef(const MediaRawData& aSample)
       : decode_timestamp(aSample.mTimecode)
-      , composition_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTime))
+      , composition_timestamp(aSample.mTime)
       , duration(aSample.mDuration)
       , byte_offset(aSample.mOffset)
       , is_sync_point(aSample.mKeyframe)
     {
     }
   };
 
   RefPtr<InitPromise> Init() override;
--- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
@@ -132,17 +132,17 @@ FFmpegAudioDecoder<LIBAV_VER>::ProcessDe
     return DecodePromise::CreateAndReject(
       MediaResult(
         NS_ERROR_OUT_OF_MEMORY,
         RESULT_DETAIL("FFmpeg audio decoder failed to allocate frame")),
       __func__);
   }
 
   int64_t samplePosition = aSample->mOffset;
-  media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime);
+  media::TimeUnit pts = aSample->mTime;
 
   DecodedData results;
   while (packet.size > 0) {
     int decoded;
     int bytesConsumed =
       mLib->avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet);
 
     if (bytesConsumed < 0) {
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -192,17 +192,18 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
       || mCodecID == AV_CODEC_ID_VP9
 #endif
       )) {
     while (inputSize) {
       uint8_t* data;
       int size;
       int len = mLib->av_parser_parse2(
         mCodecParser, mCodecContext, &data, &size, inputData, inputSize,
-        aSample->mTime, aSample->mTimecode.ToMicroseconds(), aSample->mOffset);
+        aSample->mTime.ToMicroseconds(), aSample->mTimecode.ToMicroseconds(),
+        aSample->mOffset);
       if (size_t(len) > inputSize) {
         return NS_ERROR_DOM_MEDIA_DECODE_ERR;
       }
       inputData += len;
       inputSize -= len;
       if (size) {
         bool gotFrame = false;
         MediaResult rv = DoDecode(aSample, data, size, &gotFrame, aResults);
@@ -227,17 +228,17 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
                                         MediaDataDecoder::DecodedData& aResults)
 {
   AVPacket packet;
   mLib->av_init_packet(&packet);
 
   packet.data = aData;
   packet.size = aSize;
   packet.dts = mLastInputDts = aSample->mTimecode.ToMicroseconds();
-  packet.pts = aSample->mTime;
+  packet.pts = aSample->mTime.ToMicroseconds();
   packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
   packet.pos = aSample->mOffset;
 
   // LibAV provides no API to retrieve the decoded sample's duration.
   // (FFmpeg >= 1.0 provides av_frame_get_pkt_duration)
   // As such we instead use a map using the dts as key that we will retrieve
   // later.
   // The map will have a typical size of 16 entry.
--- a/dom/media/platforms/omx/OmxDataDecoder.cpp
+++ b/dom/media/platforms/omx/OmxDataDecoder.cpp
@@ -440,17 +440,17 @@ OmxDataDecoder::FillAndEmptyBuffers()
     // Buffer size should large enough for raw data.
     MOZ_RELEASE_ASSERT(inbuf->mBuffer->nAllocLen >= data->Size());
 
     memcpy(inbuf->mBuffer->pBuffer, data->Data(), data->Size());
     inbuf->mBuffer->nFilledLen = data->Size();
     inbuf->mBuffer->nOffset = 0;
     inbuf->mBuffer->nFlags = inbuf->mBuffer->nAllocLen > data->Size() ?
                              OMX_BUFFERFLAG_ENDOFFRAME : 0;
-    inbuf->mBuffer->nTimeStamp = data->mTime;
+    inbuf->mBuffer->nTimeStamp = data->mTime.ToMicroseconds();
     if (data->Size()) {
       inbuf->mRawData = mMediaRawDatas[0];
     } else {
        LOG("send EOS buffer");
       inbuf->mBuffer->nFlags |= OMX_BUFFERFLAG_EOS;
     }
 
     LOG("feed sample %p to omx component, len %ld, flag %lX", data.get(),
--- a/dom/media/platforms/omx/OmxPromiseLayer.cpp
+++ b/dom/media/platforms/omx/OmxPromiseLayer.cpp
@@ -118,17 +118,17 @@ OmxPromiseLayer::GetBufferHolders(OMX_DI
 
   return &mOutbufferHolders;
 }
 
 already_AddRefed<MediaRawData>
 OmxPromiseLayer::FindAndRemoveRawData(OMX_TICKS aTimecode)
 {
   for (auto raw : mRawDatas) {
-    if (raw->mTime == aTimecode) {
+    if (raw->mTime.ToMicroseconds() == aTimecode) {
       mRawDatas.RemoveElement(raw);
       return raw.forget();
     }
   }
   return nullptr;
 }
 
 already_AddRefed<BufferData>
--- a/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
@@ -188,17 +188,17 @@ WMFAudioMFTManager::Init()
   return true;
 }
 
 HRESULT
 WMFAudioMFTManager::Input(MediaRawData* aSample)
 {
   return mDecoder->Input(aSample->Data(),
                          uint32_t(aSample->Size()),
-                         aSample->mTime);
+                         aSample->mTime.ToMicroseconds());
 }
 
 HRESULT
 WMFAudioMFTManager::UpdateOutputType()
 {
   HRESULT hr;
 
   RefPtr<IMFMediaType> type;
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -663,22 +663,22 @@ WMFVideoMFTManager::Input(MediaRawData* 
   if (!mDecoder) {
     // This can happen during shutdown.
     return E_FAIL;
   }
 
   RefPtr<IMFSample> inputSample;
   HRESULT hr = mDecoder->CreateInputSample(aSample->Data(),
                                            uint32_t(aSample->Size()),
-                                           aSample->mTime,
+                                           aSample->mTime.ToMicroseconds(),
                                            &inputSample);
   NS_ENSURE_TRUE(SUCCEEDED(hr) && inputSample != nullptr, hr);
 
   mLastDuration = aSample->mDuration.ToMicroseconds();
-  mLastTime = aSample->mTime;
+  mLastTime = aSample->mTime.ToMicroseconds();
   mSamplesCount++;
 
   // Forward sample data to the decoder.
   return mDecoder->Input(inputSample);
 }
 
 class SupportsConfigEvent : public Runnable {
 public:
@@ -1027,17 +1027,17 @@ WMFVideoMFTManager::Output(int64_t aStre
   }
   // Frame should be non null only when we succeeded.
   MOZ_ASSERT((frame != nullptr) == SUCCEEDED(hr));
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
   NS_ENSURE_TRUE(frame, E_FAIL);
 
   aOutData = frame;
   // Set the potentially corrected pts and duration.
-  aOutData->mTime = pts.ToMicroseconds();
+  aOutData->mTime = pts;
   aOutData->mDuration = duration;
 
   if (mNullOutputCount) {
     mGotValidOutputAfterNullOutput = true;
   }
 
   return S_OK;
 }
--- a/dom/media/wave/WaveDemuxer.cpp
+++ b/dom/media/wave/WaveDemuxer.cpp
@@ -526,29 +526,29 @@ WAVTrackDemuxer::GetNextChunk(const Medi
   if (read != aRange.Length()) {
     return nullptr;
   }
 
   UpdateState(aRange);
   ++mNumParsedChunks;
   ++mChunkIndex;
 
-  datachunk->mTime = Duration(mChunkIndex - 1).ToMicroseconds();
+  datachunk->mTime = Duration(mChunkIndex - 1);
 
   if (static_cast<uint32_t>(mChunkIndex) * DATA_CHUNK_SIZE < mDataLength) {
     datachunk->mDuration = Duration(1);
   } else {
     uint32_t mBytesRemaining =
       mDataLength - mChunkIndex * DATA_CHUNK_SIZE;
     datachunk->mDuration = DurationFromBytes(mBytesRemaining);
   }
-  datachunk->mTimecode = media::TimeUnit::FromMicroseconds(datachunk->mTime);
+  datachunk->mTimecode = datachunk->mTime;
   datachunk->mKeyframe = true;
 
-  MOZ_ASSERT(datachunk->mTime >= 0);
+  MOZ_ASSERT(!datachunk->mTime.IsNegative());
   MOZ_ASSERT(!datachunk->mDuration.IsNegative());
 
   return datachunk.forget();
 }
 
 already_AddRefed<MediaRawData>
 WAVTrackDemuxer::GetFileHeader(const MediaByteRange& aRange)
 {
--- a/dom/media/webm/WebMDemuxer.cpp
+++ b/dom/media/webm/WebMDemuxer.cpp
@@ -718,17 +718,17 @@ WebMDemuxer::GetNextPacket(TrackInfo::Tr
     } else {
       sample = new MediaRawData(data, length);
       if (length && !sample->Data()) {
         // OOM.
         return NS_ERROR_OUT_OF_MEMORY;
       }
     }
     sample->mTimecode = media::TimeUnit::FromMicroseconds(tstamp);
-    sample->mTime = tstamp;
+    sample->mTime = media::TimeUnit::FromMicroseconds(tstamp);
     sample->mDuration = media::TimeUnit::FromMicroseconds(next_tstamp - tstamp);
     sample->mOffset = holder->Offset();
     sample->mKeyframe = isKeyframe;
     if (discardPadding && i == count - 1) {
       CheckedInt64 discardFrames;
       if (discardPadding < 0) {
         // This is an invalid value as discard padding should never be negative.
         // Set to maximum value so that the decoder will reject it as it's
@@ -1077,17 +1077,17 @@ WebMTrackDemuxer::Seek(const media::Time
     }
     return SeekPromise::CreateAndReject(rv, __func__);
   }
   mNeedKeyframe = true;
 
   // Check what time we actually seeked to.
   if (mSamples.GetSize() > 0) {
     const RefPtr<MediaRawData>& sample = mSamples.First();
-    seekTime = media::TimeUnit::FromMicroseconds(sample->mTime);
+    seekTime = sample->mTime;
   }
   SetNextKeyFrameTime();
 
   return SeekPromise::CreateAndResolve(seekTime, __func__);
 }
 
 nsresult
 WebMTrackDemuxer::NextSample(RefPtr<MediaRawData>& aData)
@@ -1135,17 +1135,17 @@ WebMTrackDemuxer::GetSamples(int32_t aNu
 
 void
 WebMTrackDemuxer::SetNextKeyFrameTime()
 {
   if (mType != TrackInfo::kVideoTrack || mParent->IsMediaSource()) {
     return;
   }
 
-  int64_t frameTime = -1;
+  auto frameTime = media::TimeUnit::Invalid();
 
   mNextKeyframeTime.reset();
 
   MediaRawDataQueue skipSamplesQueue;
   bool foundKeyframe = false;
   while (!foundKeyframe && mSamples.GetSize()) {
     RefPtr<MediaRawData> sample = mSamples.PopFront();
     if (sample->mKeyframe) {
@@ -1176,18 +1176,18 @@ WebMTrackDemuxer::SetNextKeyFrameTime()
       WEBM_DEBUG("Couldn't find keyframe in a reasonable time, aborting");
       break;
     }
   }
   // We may have demuxed more than intended, so ensure that all frames are kept
   // in the right order.
   mSamples.PushFront(Move(skipSamplesQueue));
 
-  if (frameTime != -1) {
-    mNextKeyframeTime.emplace(media::TimeUnit::FromMicroseconds(frameTime));
+  if (frameTime.IsValid()) {
+    mNextKeyframeTime.emplace(frameTime);
     WEBM_DEBUG("Next Keyframe %f (%u queued %.02fs)",
                mNextKeyframeTime.value().ToSeconds(),
                uint32_t(mSamples.GetSize()),
                (mSamples.Last()->mTimecode - mSamples.First()->mTimecode).ToSeconds());
   } else {
     WEBM_DEBUG("Couldn't determine next keyframe time  (%u queued)",
                uint32_t(mSamples.GetSize()));
   }
@@ -1215,18 +1215,17 @@ WebMTrackDemuxer::UpdateSamples(nsTArray
     if (sample->mCrypto.mValid) {
       nsAutoPtr<MediaRawDataWriter> writer(sample->CreateWriter());
       writer->mCrypto.mMode = mInfo->mCrypto.mMode;
       writer->mCrypto.mIVSize = mInfo->mCrypto.mIVSize;
       writer->mCrypto.mKeyId.AppendElements(mInfo->mCrypto.mKeyId);
     }
   }
   if (mNextKeyframeTime.isNothing()
-      || aSamples.LastElement()->mTime
-         >= mNextKeyframeTime.value().ToMicroseconds()) {
+      || aSamples.LastElement()->mTime >= mNextKeyframeTime.value()) {
     SetNextKeyFrameTime();
   }
 }
 
 nsresult
 WebMTrackDemuxer::GetNextRandomAccessPoint(media::TimeUnit* aTime)
 {
   if (mNextKeyframeTime.isNothing()) {
@@ -1242,35 +1241,32 @@ WebMTrackDemuxer::GetNextRandomAccessPoi
 RefPtr<WebMTrackDemuxer::SkipAccessPointPromise>
 WebMTrackDemuxer::SkipToNextRandomAccessPoint(
   const media::TimeUnit& aTimeThreshold)
 {
   uint32_t parsed = 0;
   bool found = false;
   RefPtr<MediaRawData> sample;
   nsresult rv = NS_OK;
-  int64_t sampleTime;
 
   WEBM_DEBUG("TimeThreshold: %f", aTimeThreshold.ToSeconds());
   while (!found && NS_SUCCEEDED((rv = NextSample(sample)))) {
     parsed++;
-    sampleTime = sample->mTime;
-    if (sample->mKeyframe && sampleTime >= aTimeThreshold.ToMicroseconds()) {
+    if (sample->mKeyframe && sample->mTime >= aTimeThreshold) {
+      WEBM_DEBUG("next sample: %f (parsed: %d)",
+                 sample->mTime.ToSeconds(), parsed);
       found = true;
       mSamples.Reset();
       mSamples.PushFront(sample.forget());
     }
   }
   if (NS_SUCCEEDED(rv)) {
     SetNextKeyFrameTime();
   }
   if (found) {
-    WEBM_DEBUG("next sample: %f (parsed: %d)",
-               media::TimeUnit::FromMicroseconds(sampleTime).ToSeconds(),
-               parsed);
     return SkipAccessPointPromise::CreateAndResolve(parsed, __func__);
   } else {
     SkipFailureHolder failure(NS_ERROR_DOM_MEDIA_END_OF_STREAM, parsed);
     return SkipAccessPointPromise::CreateAndReject(Move(failure), __func__);
   }
 }
 
 media::TimeIntervals
--- a/media/libstagefright/binding/Index.cpp
+++ b/media/libstagefright/binding/Index.cpp
@@ -96,17 +96,17 @@ already_AddRefed<MediaRawData> SampleIte
   mIndex->mSource->Length(&length);
   if (s->mByteRange.mEnd > length) {
     // We don't have this complete sample.
     return nullptr;
   }
 
   RefPtr<MediaRawData> sample = new MediaRawData();
   sample->mTimecode= TimeUnit::FromMicroseconds(s->mDecodeTime);
-  sample->mTime = s->mCompositionRange.start;
+  sample->mTime = TimeUnit::FromMicroseconds(s->mCompositionRange.start);
   sample->mDuration = TimeUnit::FromMicroseconds(s->mCompositionRange.Length());
   sample->mOffset = s->mByteRange.mStart;
   sample->mKeyframe = s->mSync;
 
   nsAutoPtr<MediaRawDataWriter> writer(sample->CreateWriter());
   // Do the blocking read
   if (!writer->SetSize(s->mByteRange.Length())) {
     return nullptr;