Bug 1356506 - change the type of MediaData::mTimecode to TimeUnit since int64_t is ambiguous. draft
authorJW Wang <jwwang@mozilla.com>
Fri, 14 Apr 2017 14:52:14 +0800
changeset 565532 c77c9ba75b45c73f0abf2565b445e93b6362aabe
parent 565531 20dff607fb88ee69135a280bbb7f32df75a86237
child 565604 56ccde5ab392b67a49b05729fa3db44229f06e71
child 565755 f12c139975172709b31345455a988d142319ce05
push id54901
push userjwwang@mozilla.com
push dateThu, 20 Apr 2017 02:04:57 +0000
bugs1356506
milestone55.0a1
Bug 1356506 - change the type of MediaData::mTimecode to TimeUnit since int64_t is ambiguous. MozReview-Commit-ID: 7dO5OOUuORz
dom/media/ADTSDemuxer.cpp
dom/media/MP3Demuxer.cpp
dom/media/MediaData.cpp
dom/media/MediaData.h
dom/media/MediaFormatReader.cpp
dom/media/flac/FlacDemuxer.cpp
dom/media/fmp4/MP4Demuxer.cpp
dom/media/gtest/TestMP4Demuxer.cpp
dom/media/ipc/VideoDecoderChild.cpp
dom/media/ipc/VideoDecoderParent.cpp
dom/media/mediasource/TrackBuffersManager.cpp
dom/media/ogg/OggCodecState.cpp
dom/media/platforms/agnostic/NullDecoderModule.cpp
dom/media/platforms/agnostic/TheoraDecoder.cpp
dom/media/platforms/agnostic/VPXDecoder.cpp
dom/media/platforms/agnostic/VorbisDecoder.cpp
dom/media/platforms/apple/AppleVTDecoder.cpp
dom/media/platforms/apple/AppleVTDecoder.h
dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
dom/media/wave/WaveDemuxer.cpp
dom/media/webm/WebMDemuxer.cpp
media/libstagefright/binding/Index.cpp
--- a/dom/media/ADTSDemuxer.cpp
+++ b/dom/media/ADTSDemuxer.cpp
@@ -749,17 +749,17 @@ ADTSTrackDemuxer::GetNextFrame(const adt
     ADTSLOG("GetNext() Exit read=%u frame->Size()=%" PRIuSIZE, read, frame->Size());
     return nullptr;
   }
 
   UpdateState(aFrame);
 
   frame->mTime = Duration(mFrameIndex - 1).ToMicroseconds();
   frame->mDuration = Duration(1);
-  frame->mTimecode = frame->mTime;
+  frame->mTimecode = media::TimeUnit::FromMicroseconds(frame->mTime);
   frame->mKeyframe = true;
 
   MOZ_ASSERT(frame->mTime >= 0);
   MOZ_ASSERT(frame->mDuration.IsPositive());
 
   ADTSLOGV("GetNext() End mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64
            " mFrameIndex=%" PRId64 " mTotalFrameLen=%" PRIu64
            " mSamplesPerFrame=%d mSamplesPerSecond=%d mChannels=%d",
--- a/dom/media/MP3Demuxer.cpp
+++ b/dom/media/MP3Demuxer.cpp
@@ -601,17 +601,17 @@ MP3TrackDemuxer::GetNextFrame(const Medi
     MP3LOG("GetNext() Exit read=%u frame->Size()=%" PRIuSIZE, read, frame->Size());
     return nullptr;
   }
 
   UpdateState(aRange);
 
   frame->mTime = Duration(mFrameIndex - 1).ToMicroseconds();
   frame->mDuration = Duration(1);
-  frame->mTimecode = frame->mTime;
+  frame->mTimecode = media::TimeUnit::FromMicroseconds(frame->mTime);
   frame->mKeyframe = true;
 
   MOZ_ASSERT(frame->mTime >= 0);
   MOZ_ASSERT(frame->mDuration.IsPositive());
 
   if (mNumParsedFrames == 1) {
     // First frame parsed, let's read VBR info if available.
     ByteReader reader(frame->Data(), frame->Size());
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -171,17 +171,17 @@ VideoData::VideoData(int64_t aOffset,
                      layers::ImageContainer::FrameID aFrameID)
   : MediaData(VIDEO_DATA, aOffset, aTime, aDuration, 1)
   , mDisplay(aDisplay)
   , mFrameID(aFrameID)
   , mSentToCompositor(false)
 {
   MOZ_ASSERT(!mDuration.IsNegative(), "Frame must have non-negative duration.");
   mKeyframe = aKeyframe;
-  mTimecode = aTimecode;
+  mTimecode = TimeUnit::FromMicroseconds(aTimecode);
 }
 
 VideoData::~VideoData()
 {
 }
 
 void
 VideoData::SetListener(UniquePtr<Listener> aListener)
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -290,17 +290,17 @@ public:
   MediaData(Type aType,
             int64_t aOffset,
             int64_t aTimestamp,
             int64_t aDuration,
             uint32_t aFrames)
     : mType(aType)
     , mOffset(aOffset)
     , mTime(aTimestamp)
-    , mTimecode(aTimestamp)
+    , mTimecode(media::TimeUnit::FromMicroseconds(aTimestamp))
     , mDuration(media::TimeUnit::FromMicroseconds(aDuration))
     , mFrames(aFrames)
     , mKeyframe(false)
   {
   }
 
   // Type of contained data.
   const Type mType;
@@ -308,17 +308,17 @@ public:
   // Approximate byte offset where this data was demuxed from its media.
   int64_t mOffset;
 
   // Start time of sample, in microseconds.
   int64_t mTime;
 
   // Codec specific internal time code. For Ogg based codecs this is the
   // granulepos.
-  int64_t mTimecode;
+  media::TimeUnit mTimecode;
 
   // Duration of sample, in microseconds.
   media::TimeUnit mDuration;
 
   // Amount of frames for contained data.
   const uint32_t mFrames;
 
   bool mKeyframe;
@@ -348,17 +348,16 @@ public:
     return static_cast<ReturnType*>(this);
   }
 
 protected:
   MediaData(Type aType, uint32_t aFrames)
     : mType(aType)
     , mOffset(0)
     , mTime(0)
-    , mTimecode(0)
     , mFrames(aFrames)
     , mKeyframe(false)
   {
   }
 
   virtual ~MediaData() { }
 
 };
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -2018,17 +2018,17 @@ MediaFormatReader::HandleDemuxedSamples(
         LOG("Stream change occurred on a non-keyframe. Seeking to:%" PRId64,
             sample->mTime);
         InternalSeek(aTrack, seekTarget);
       }
       return;
     }
 
     LOGV("Input:%" PRId64 " (dts:%" PRId64 " kf:%d)",
-         sample->mTime, sample->mTimecode, sample->mKeyframe);
+         sample->mTime, sample->mTimecode.ToMicroseconds(), sample->mKeyframe);
     decoder.mNumSamplesInput++;
     decoder.mSizeOfQueue++;
     if (aTrack == TrackInfo::kVideoTrack) {
       aA.mStats.mParsedFrames++;
     }
 
     DecodeDemuxedSamples(aTrack, sample);
 
--- a/dom/media/flac/FlacDemuxer.cpp
+++ b/dom/media/flac/FlacDemuxer.cpp
@@ -977,17 +977,17 @@ FlacTrackDemuxer::GetNextFrame(const fla
   const uint32_t read = Read(frameWriter->Data(), offset, size);
   if (read != size) {
     LOG("GetNextFrame() Exit read=%u frame->Size=%" PRIuSIZE, read, frame->Size());
     return nullptr;
   }
 
   frame->mTime = aFrame.Time().ToMicroseconds();
   frame->mDuration = aFrame.Duration();
-  frame->mTimecode = frame->mTime;
+  frame->mTimecode = TimeUnit::FromMicroseconds(frame->mTime);
   frame->mOffset = aFrame.Offset();
   frame->mKeyframe = true;
 
   MOZ_ASSERT(frame->mTime >= 0);
   MOZ_ASSERT(!frame->mDuration.IsNegative());
 
   return frame.forget();
 }
--- a/dom/media/fmp4/MP4Demuxer.cpp
+++ b/dom/media/fmp4/MP4Demuxer.cpp
@@ -458,28 +458,28 @@ MP4TrackDemuxer::GetNextSample()
         {
           bool keyframe = type == mp4_demuxer::H264::FrameType::I_FRAME;
           if (sample->mKeyframe != keyframe) {
             NS_WARNING(nsPrintfCString("Frame incorrectly marked as %skeyframe "
                                        "@ pts:%" PRId64 " dur:%" PRId64
                                        " dts:%" PRId64,
                                        keyframe ? "" : "non-", sample->mTime,
                                        sample->mDuration.ToMicroseconds(),
-                                       sample->mTimecode)
+                                       sample->mTimecode.ToMicroseconds())
                          .get());
             sample->mKeyframe = keyframe;
           }
           break;
         }
         case mp4_demuxer::H264::FrameType::INVALID:
           NS_WARNING(
             nsPrintfCString("Invalid H264 frame @ pts:%" PRId64 " dur:%" PRId64
                             " dts:%" PRId64,
                             sample->mTime, sample->mDuration.ToMicroseconds(),
-                            sample->mTimecode)
+                            sample->mTimecode.ToMicroseconds())
               .get());
           // We could reject the sample now, however demuxer errors are fatal.
           // So we keep the invalid frame, relying on the H264 decoder to
           // handle the error later.
           // TODO: make demuxer errors non-fatal.
           break;
       }
     }
--- a/dom/media/gtest/TestMP4Demuxer.cpp
+++ b/dom/media/gtest/TestMP4Demuxer.cpp
@@ -120,17 +120,17 @@ public:
             }
           },
           [binding] (const MediaResult& aError) {
             if (aError == NS_ERROR_DOM_MEDIA_END_OF_STREAM) {
               EXPECT_TRUE(binding->mSamples.Length() > 1);
               for (uint32_t i = 0; i < (binding->mSamples.Length() - 1); i++) {
                 EXPECT_LT(binding->mSamples[i]->mTimecode, binding->mSamples[i + 1]->mTimecode);
                 if (binding->mSamples[i]->mKeyframe) {
-                  binding->mKeyFrameTimecodes.AppendElement(binding->mSamples[i]->mTimecode);
+                  binding->mKeyFrameTimecodes.AppendElement(binding->mSamples[i]->mTimecode.ToMicroseconds());
                 }
               }
               binding->mCheckTrackSamples.Resolve(true, __func__);
             } else {
               EXPECT_TRUE(false);
               binding->mCheckTrackSamples.Reject(aError, __func__);
             }
           }
--- a/dom/media/ipc/VideoDecoderChild.cpp
+++ b/dom/media/ipc/VideoDecoderChild.cpp
@@ -226,17 +226,17 @@ VideoDecoderChild::Decode(MediaRawData* 
     return MediaDataDecoder::DecodePromise::CreateAndReject(
       NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__);
   }
 
   memcpy(buffer.get<uint8_t>(), aSample->Data(), aSample->Size());
 
   MediaRawDataIPDL sample(MediaDataIPDL(aSample->mOffset,
                                         aSample->mTime,
-                                        aSample->mTimecode,
+                                        aSample->mTimecode.ToMicroseconds(),
                                         aSample->mDuration.ToMicroseconds(),
                                         aSample->mFrames,
                                         aSample->mKeyframe),
                           buffer);
   SendInput(sample);
   return mDecodePromise.Ensure(__func__);
 }
 
--- a/dom/media/ipc/VideoDecoderParent.cpp
+++ b/dom/media/ipc/VideoDecoderParent.cpp
@@ -133,17 +133,17 @@ VideoDecoderParent::RecvInput(const Medi
                                                aData.buffer().Size<uint8_t>());
   if (aData.buffer().Size<uint8_t>() && !data->Data()) {
     // OOM
     Error(NS_ERROR_OUT_OF_MEMORY);
     return IPC_OK();
   }
   data->mOffset = aData.base().offset();
   data->mTime = aData.base().time();
-  data->mTimecode = aData.base().timecode();
+  data->mTimecode = media::TimeUnit::FromMicroseconds(aData.base().timecode());
   data->mDuration = media::TimeUnit::FromMicroseconds(aData.base().duration());
   data->mKeyframe = aData.base().keyframe();
 
   DeallocShmem(aData.buffer());
 
   RefPtr<VideoDecoderParent> self = this;
   mDecoder->Decode(data)->Then(
     mManagerTaskQueue, __func__,
@@ -186,17 +186,17 @@ VideoDecoderParent::ProcessDecodedData(
     }
 
     if (texture && !texture->IsAddedToCompositableClient()) {
       texture->InitIPDLActor(mKnowsCompositor);
       texture->SetAddedToCompositableClient();
     }
 
     VideoDataIPDL output(
-      MediaDataIPDL(data->mOffset, data->mTime, data->mTimecode,
+      MediaDataIPDL(data->mOffset, data->mTime, data->mTimecode.ToMicroseconds(),
                     data->mDuration.ToMicroseconds(),
                     data->mFrames, data->mKeyframe),
       video->mDisplay,
       texture ? texture->GetSize() : IntSize(),
       texture ? mParent->StoreImage(video->mImage, texture)
               : SurfaceDescriptorGPUVideo(0),
       video->mFrameID);
     Unused << SendOutput(output);
--- a/dom/media/mediasource/TrackBuffersManager.cpp
+++ b/dom/media/mediasource/TrackBuffersManager.cpp
@@ -1485,17 +1485,17 @@ TrackBuffersManager::ProcessFrames(Track
   }
 
   for (auto& sample : aSamples) {
     SAMPLE_DEBUG("Processing %s frame(pts:%" PRId64 " end:%" PRId64 ", dts:%" PRId64 ", duration:%" PRId64 ", "
                "kf:%d)",
                aTrackData.mInfo->mMimeType.get(),
                sample->mTime,
                sample->GetEndTime().ToMicroseconds(),
-               sample->mTimecode,
+               sample->mTimecode.ToMicroseconds(),
                sample->mDuration.ToMicroseconds(),
                sample->mKeyframe);
 
     const TimeUnit sampleEndTime = sample->GetEndTime();
     if (sampleEndTime > aTrackData.mLastParsedEndTime) {
       aTrackData.mLastParsedEndTime = sampleEndTime;
     }
 
@@ -1520,17 +1520,17 @@ TrackBuffersManager::ProcessFrames(Track
     //   Let presentation timestamp be a double precision floating point representation of the coded frame's presentation timestamp in seconds.
     //   Let decode timestamp be a double precision floating point representation of the coded frame's decode timestamp in seconds.
 
     // 2. Let frame duration be a double precision floating point representation of the coded frame's duration in seconds.
     // Step 3 is performed earlier or when a discontinuity has been detected.
     // 4. If timestampOffset is not 0, then run the following steps:
 
     TimeUnit sampleTime = TimeUnit::FromMicroseconds(sample->mTime);
-    TimeUnit sampleTimecode = TimeUnit::FromMicroseconds(sample->mTimecode);
+    TimeUnit sampleTimecode = sample->mTimecode;
     TimeUnit sampleDuration = sample->mDuration;
     TimeUnit timestampOffset = mSourceBufferAttributes->GetTimestampOffset();
 
     TimeInterval sampleInterval =
       mSourceBufferAttributes->mGenerateTimestamps
       ? TimeInterval(timestampOffset, timestampOffset + sampleDuration)
       : TimeInterval(timestampOffset + sampleTime,
                      timestampOffset + sampleTime + sampleDuration);
@@ -1614,17 +1614,17 @@ TrackBuffersManager::ProcessFrames(Track
       trackBuffer.mNeedRandomAccessPoint = true;
       needDiscontinuityCheck = true;
       continue;
     }
 
     samplesRange += sampleInterval;
     sizeNewSamples += sample->ComputedSizeOfIncludingThis();
     sample->mTime = sampleInterval.mStart.ToMicroseconds();
-    sample->mTimecode = decodeTimestamp.ToMicroseconds();
+    sample->mTimecode = decodeTimestamp;
     sample->mTrackInfo = trackBuffer.mLastInfo;
     samples.AppendElement(sample);
 
     // Steps 11,12,13,14, 15 and 16 will be done in one block in InsertFrames.
 
     trackBuffer.mLongestFrameDuration =
       trackBuffer.mLastFrameDuration.isSome()
       ? sample->mKeyframe
@@ -2165,17 +2165,17 @@ TrackBuffersManager::Seek(TrackInfo::Tra
   uint32_t lastKeyFrameIndex = 0;
   for (; i < track.Length(); i++) {
     const RefPtr<MediaRawData>& sample = track[i];
     TimeUnit sampleTime = TimeUnit::FromMicroseconds(sample->mTime);
     if (sampleTime > aTime && lastKeyFrameTime.isSome()) {
       break;
     }
     if (sample->mKeyframe) {
-      lastKeyFrameTimecode = TimeUnit::FromMicroseconds(sample->mTimecode);
+      lastKeyFrameTimecode = sample->mTimecode;
       lastKeyFrameTime = Some(sampleTime);
       lastKeyFrameIndex = i;
     }
     if (sampleTime == aTime ||
         (sampleTime > aTime && lastKeyFrameTime.isSome())) {
       break;
     }
   }
@@ -2237,39 +2237,36 @@ TrackBuffersManager::SkipToNextRandomAcc
     if (!sample) {
       break;
     }
     if (sample->mKeyframe &&
         sample->mTime >= aTimeThreadshold.ToMicroseconds()) {
       aFound = true;
       break;
     }
-    nextSampleTimecode =
-      TimeUnit::FromMicroseconds(sample->mTimecode) + sample->mDuration;
+    nextSampleTimecode = sample->mTimecode + sample->mDuration;
     nextSampleTime = sample->GetEndTime();
     parsed++;
   }
 
   // Adjust the next demux time and index so that the next call to
   // SkipToNextRandomAccessPoint will not count again the parsed sample as
   // skipped.
   if (aFound) {
-    trackData.mNextSampleTimecode =
-       TimeUnit::FromMicroseconds(track[i]->mTimecode);
+    trackData.mNextSampleTimecode = track[i]->mTimecode;
     trackData.mNextSampleTime =
        TimeUnit::FromMicroseconds(track[i]->mTime);
     trackData.mNextGetSampleIndex = Some(i);
   } else if (i > 0) {
     // Go back to the previous keyframe or the original position so the next
     // demux can succeed and be decoded.
     for (int j = i - 1; j >= originalPos; j--) {
       const RefPtr<MediaRawData>& sample = track[j];
       if (sample->mKeyframe) {
-        trackData.mNextSampleTimecode =
-          TimeUnit::FromMicroseconds(sample->mTimecode);
+        trackData.mNextSampleTimecode = sample->mTimecode;
         trackData.mNextSampleTime = TimeUnit::FromMicroseconds(sample->mTime);
         trackData.mNextGetSampleIndex = Some(uint32_t(j));
         // We are unable to skip to a keyframe past aTimeThreshold, however
         // we are speeding up decoding by dropping the unplayable frames.
         // So we can mark aFound as true.
         aFound = true;
         break;
       }
@@ -2295,17 +2292,17 @@ TrackBuffersManager::GetSample(TrackInfo
   const TrackBuffer& track = GetTrackBuffer(aTrack);
 
   if (aIndex >= track.Length()) {
     // reached the end.
     return nullptr;
   }
 
   const RefPtr<MediaRawData>& sample = track[aIndex];
-  if (!aIndex || sample->mTimecode <= (aExpectedDts + aFuzz).ToMicroseconds() ||
+  if (!aIndex || sample->mTimecode <= aExpectedDts + aFuzz ||
       sample->mTime <= (aExpectedPts + aFuzz).ToMicroseconds()) {
     return sample;
   }
 
   // Gap is too big. End of Stream or Waiting for Data.
   // TODO, check that we have continuous data based on the sanitized buffered
   // range instead.
   return nullptr;
@@ -2353,43 +2350,40 @@ TrackBuffersManager::GetSample(TrackInfo
       aResult = MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
       return nullptr;
     }
     if (p->mKeyframe) {
       UpdateEvictionIndex(trackData, trackData.mNextGetSampleIndex.ref());
     }
     trackData.mNextGetSampleIndex.ref()++;
     // Estimate decode timestamp and timestamp of the next sample.
-    TimeUnit nextSampleTimecode =
-      TimeUnit::FromMicroseconds(sample->mTimecode) + sample->mDuration;
+    TimeUnit nextSampleTimecode = sample->mTimecode + sample->mDuration;
     TimeUnit nextSampleTime = sample->GetEndTime();
     const MediaRawData* nextSample =
       GetSample(aTrack,
                 trackData.mNextGetSampleIndex.ref(),
                 nextSampleTimecode,
                 nextSampleTime,
                 aFuzz);
     if (nextSample) {
       // We have a valid next sample, can use exact values.
-      trackData.mNextSampleTimecode =
-        TimeUnit::FromMicroseconds(nextSample->mTimecode);
+      trackData.mNextSampleTimecode = nextSample->mTimecode;
       trackData.mNextSampleTime =
         TimeUnit::FromMicroseconds(nextSample->mTime);
     } else {
       // Next sample isn't available yet. Use estimates.
       trackData.mNextSampleTimecode = nextSampleTimecode;
       trackData.mNextSampleTime = nextSampleTime;
     }
     aResult = NS_OK;
     return p.forget();
   }
 
   if (trackData.mNextSampleTimecode >
-      TimeUnit::FromMicroseconds(track.LastElement()->mTimecode)
-       + track.LastElement()->mDuration) {
+      track.LastElement()->mTimecode + track.LastElement()->mDuration) {
     // The next element is past our last sample. We're done.
     trackData.mNextGetSampleIndex = Some(uint32_t(track.Length()));
     aResult = NS_ERROR_DOM_MEDIA_END_OF_STREAM;
     return nullptr;
   }
 
   // Our previous index has been overwritten, attempt to find the new one.
   int32_t pos = FindCurrentPosition(aTrack, aFuzz);
@@ -2410,18 +2404,17 @@ TrackBuffersManager::GetSample(TrackInfo
 
   // Find the previous keyframe to calculate the evictable amount.
   int32_t i = pos;
   for (; !track[i]->mKeyframe; i--) {
   }
   UpdateEvictionIndex(trackData, i);
 
   trackData.mNextGetSampleIndex = Some(uint32_t(pos)+1);
-  trackData.mNextSampleTimecode =
-    TimeUnit::FromMicroseconds(sample->mTimecode) + sample->mDuration;
+  trackData.mNextSampleTimecode = sample->mTimecode + sample->mDuration;
   trackData.mNextSampleTime = sample->GetEndTime();
   aResult = NS_OK;
   return p.forget();
 }
 
 int32_t
 TrackBuffersManager::FindCurrentPosition(TrackInfo::TrackType aTrack,
                                          const TimeUnit& aFuzz) const
@@ -2429,34 +2422,34 @@ TrackBuffersManager::FindCurrentPosition
   MOZ_ASSERT(OnTaskQueue());
   auto& trackData = GetTracksData(aTrack);
   const TrackBuffer& track = GetTrackBuffer(aTrack);
 
   // Perform an exact search first.
   for (uint32_t i = 0; i < track.Length(); i++) {
     const RefPtr<MediaRawData>& sample = track[i];
     TimeInterval sampleInterval{
-      TimeUnit::FromMicroseconds(sample->mTimecode),
-      TimeUnit::FromMicroseconds(sample->mTimecode) + sample->mDuration};
+      sample->mTimecode,
+      sample->mTimecode + sample->mDuration};
 
     if (sampleInterval.ContainsStrict(trackData.mNextSampleTimecode)) {
       return i;
     }
     if (sampleInterval.mStart > trackData.mNextSampleTimecode) {
       // Samples are ordered by timecode. There's no need to search
       // any further.
       break;
     }
   }
 
   for (uint32_t i = 0; i < track.Length(); i++) {
     const RefPtr<MediaRawData>& sample = track[i];
     TimeInterval sampleInterval{
-      TimeUnit::FromMicroseconds(sample->mTimecode),
-      TimeUnit::FromMicroseconds(sample->mTimecode) + sample->mDuration,
+      sample->mTimecode,
+      sample->mTimecode + sample->mDuration,
       aFuzz};
 
     if (sampleInterval.ContainsWithStrictEnd(trackData.mNextSampleTimecode)) {
       return i;
     }
     if (sampleInterval.mStart - aFuzz > trackData.mNextSampleTimecode) {
       // Samples are ordered by timecode. There's no need to search
       // any further.
@@ -2506,18 +2499,17 @@ TrackBuffersManager::GetNextRandomAccess
     const MediaRawData* sample =
       GetSample(aTrack, i, nextSampleTimecode, nextSampleTime, aFuzz);
     if (!sample) {
       break;
     }
     if (sample->mKeyframe) {
       return TimeUnit::FromMicroseconds(sample->mTime);
     }
-    nextSampleTimecode =
-      TimeUnit::FromMicroseconds(sample->mTimecode) + sample->mDuration;
+    nextSampleTimecode = sample->mTimecode + sample->mDuration;
     nextSampleTime = sample->GetEndTime();
   }
   return TimeUnit::FromInfinity();
 }
 
 void
 TrackBuffersManager::TrackData::AddSizeOfResources(MediaSourceDecoder::ResourceSizes* aSizes) const
 {
--- a/dom/media/ogg/OggCodecState.cpp
+++ b/dom/media/ogg/OggCodecState.cpp
@@ -254,17 +254,17 @@ OggCodecState::PacketOutAsMediaRawData()
   }
 
   int64_t end_tstamp = Time(packet->granulepos);
   NS_ASSERTION(end_tstamp >= 0, "timestamp invalid");
 
   int64_t duration = PacketDuration(packet.get());
   NS_ASSERTION(duration >= 0, "duration invalid");
 
-  sample->mTimecode = packet->granulepos;
+  sample->mTimecode = media::TimeUnit::FromMicroseconds(packet->granulepos);
   sample->mTime = end_tstamp - duration;
   sample->mDuration = media::TimeUnit::FromMicroseconds(duration);
   sample->mKeyframe = IsKeyframe(packet.get());
   sample->mEOS = packet->e_o_s;
 
   return sample.forget();
 }
 
--- a/dom/media/platforms/agnostic/NullDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/NullDecoderModule.cpp
@@ -15,17 +15,17 @@ public:
   already_AddRefed<MediaData> Create(MediaRawData* aSample) override
   {
     // Create a dummy VideoData with no image. This gives us something to
     // send to media streams if necessary.
     RefPtr<VideoData> v(new VideoData(aSample->mOffset,
                                       aSample->mTime,
                                       aSample->mDuration.ToMicroseconds(),
                                       aSample->mKeyframe,
-                                      aSample->mTimecode,
+                                      aSample->mTimecode.ToMicroseconds(),
                                       gfx::IntSize(),
                                       0));
     return v.forget();
   }
 };
 
 class NullDecoderModule : public PlatformDecoderModule {
 public:
--- a/dom/media/platforms/agnostic/TheoraDecoder.cpp
+++ b/dom/media/platforms/agnostic/TheoraDecoder.cpp
@@ -128,17 +128,18 @@ TheoraDecoder::ProcessDecode(MediaRawDat
 {
   MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
 
   const unsigned char* aData = aSample->Data();
   size_t aLength = aSample->Size();
 
   bool bos = mPacketCount == 0;
   ogg_packet pkt = InitTheoraPacket(
-    aData, aLength, bos, false, aSample->mTimecode, mPacketCount++);
+    aData, aLength, bos, false,
+    aSample->mTimecode.ToMicroseconds(), mPacketCount++);
 
   int ret = th_decode_packetin(mTheoraDecoderContext, &pkt, nullptr);
   if (ret == 0 || ret == TH_DUPFRAME) {
     th_ycbcr_buffer ycbcr;
     th_decode_ycbcr_out(mTheoraDecoderContext, ycbcr);
 
     int hdec = !(mTheoraInfo.pixel_fmt & 1);
     int vdec = !(mTheoraInfo.pixel_fmt & 2);
@@ -170,17 +171,17 @@ TheoraDecoder::ProcessDecode(MediaRawDat
     RefPtr<VideoData> v =
       VideoData::CreateAndCopyData(info,
                                    mImageContainer,
                                    aSample->mOffset,
                                    aSample->mTime,
                                    aSample->mDuration,
                                    b,
                                    aSample->mKeyframe,
-                                   aSample->mTimecode,
+                                   aSample->mTimecode.ToMicroseconds(),
                                    mInfo.ScaledImageRect(mTheoraInfo.frame_width,
                                                          mTheoraInfo.frame_height));
     if (!v) {
       LOG(
         "Image allocation error source %ux%u display %ux%u picture %ux%u",
         mTheoraInfo.frame_width,
         mTheoraInfo.frame_height,
         mInfo.mDisplay.width,
--- a/dom/media/platforms/agnostic/VPXDecoder.cpp
+++ b/dom/media/platforms/agnostic/VPXDecoder.cpp
@@ -206,17 +206,17 @@ VPXDecoder::ProcessDecode(MediaRawData* 
     if (!img_alpha) {
       v = VideoData::CreateAndCopyData(mInfo,
                                        mImageContainer,
                                        aSample->mOffset,
                                        aSample->mTime,
                                        aSample->mDuration,
                                        b,
                                        aSample->mKeyframe,
-                                       aSample->mTimecode,
+                                       aSample->mTimecode.ToMicroseconds(),
                                        mInfo.ScaledImageRect(img->d_w,
                                                              img->d_h));
     } else {
       VideoData::YCbCrBuffer::Plane alpha_plane;
       alpha_plane.mData = img_alpha->planes[0];
       alpha_plane.mStride = img_alpha->stride[0];
       alpha_plane.mHeight = img_alpha->d_h;
       alpha_plane.mWidth = img_alpha->d_w;
@@ -224,17 +224,17 @@ VPXDecoder::ProcessDecode(MediaRawData* 
       v = VideoData::CreateAndCopyData(mInfo,
                                        mImageContainer,
                                        aSample->mOffset,
                                        aSample->mTime,
                                        aSample->mDuration,
                                        b,
                                        alpha_plane,
                                        aSample->mKeyframe,
-                                       aSample->mTimecode,
+                                       aSample->mTimecode.ToMicroseconds(),
                                        mInfo.ScaledImageRect(img->d_w,
                                                              img->d_h));
 
     }
 
     if (!v) {
       LOG(
         "Image allocation error source %ux%u display %ux%u picture %ux%u",
--- a/dom/media/platforms/agnostic/VorbisDecoder.cpp
+++ b/dom/media/platforms/agnostic/VorbisDecoder.cpp
@@ -147,18 +147,19 @@ VorbisDataDecoder::ProcessDecode(MediaRa
   MOZ_ASSERT(mPacketCount >= 3);
 
   if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime) {
     // We are starting a new block.
     mFrames = 0;
     mLastFrameTime = Some(aSample->mTime);
   }
 
-  ogg_packet pkt = InitVorbisPacket(aData, aLength, false, aSample->mEOS,
-                                    aSample->mTimecode, mPacketCount++);
+  ogg_packet pkt = InitVorbisPacket(
+    aData, aLength, false, aSample->mEOS,
+    aSample->mTimecode.ToMicroseconds(), mPacketCount++);
 
   int err = vorbis_synthesis(&mVorbisBlock, &pkt);
   if (err) {
     return DecodePromise::CreateAndReject(
       MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                   RESULT_DETAIL("vorbis_synthesis:%d", err)),
       __func__);
   }
--- a/dom/media/platforms/apple/AppleVTDecoder.cpp
+++ b/dom/media/platforms/apple/AppleVTDecoder.cpp
@@ -129,17 +129,17 @@ TimingInfoFromSample(MediaRawData* aSamp
 {
   CMSampleTimingInfo timestamp;
 
   timestamp.duration = CMTimeMake(
     aSample->mDuration.ToMicroseconds(), USECS_PER_S);
   timestamp.presentationTimeStamp =
     CMTimeMake(aSample->mTime, USECS_PER_S);
   timestamp.decodeTimeStamp =
-    CMTimeMake(aSample->mTimecode, USECS_PER_S);
+    CMTimeMake(aSample->mTimecode.ToMicroseconds(), USECS_PER_S);
 
   return timestamp;
 }
 
 void
 AppleVTDecoder::ProcessDecode(MediaRawData* aSample)
 {
   AssertOnTaskQueueThread();
--- a/dom/media/platforms/apple/AppleVTDecoder.h
+++ b/dom/media/platforms/apple/AppleVTDecoder.h
@@ -27,17 +27,17 @@ public:
   public:
     media::TimeUnit decode_timestamp;
     media::TimeUnit composition_timestamp;
     media::TimeUnit duration;
     int64_t byte_offset;
     bool is_sync_point;
 
     explicit AppleFrameRef(const MediaRawData& aSample)
-      : decode_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTimecode))
+      : decode_timestamp(aSample.mTimecode)
       , composition_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTime))
       , duration(aSample.mDuration)
       , byte_offset(aSample.mOffset)
       , is_sync_point(aSample.mKeyframe)
     {
     }
   };
 
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -192,17 +192,17 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
       || mCodecID == AV_CODEC_ID_VP9
 #endif
       )) {
     while (inputSize) {
       uint8_t* data;
       int size;
       int len = mLib->av_parser_parse2(
         mCodecParser, mCodecContext, &data, &size, inputData, inputSize,
-        aSample->mTime, aSample->mTimecode, aSample->mOffset);
+        aSample->mTime, aSample->mTimecode.ToMicroseconds(), aSample->mOffset);
       if (size_t(len) > inputSize) {
         return NS_ERROR_DOM_MEDIA_DECODE_ERR;
       }
       inputData += len;
       inputSize -= len;
       if (size) {
         bool gotFrame = false;
         MediaResult rv = DoDecode(aSample, data, size, &gotFrame, aResults);
@@ -226,27 +226,28 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
                                         bool* aGotFrame,
                                         MediaDataDecoder::DecodedData& aResults)
 {
   AVPacket packet;
   mLib->av_init_packet(&packet);
 
   packet.data = aData;
   packet.size = aSize;
-  packet.dts = mLastInputDts = aSample->mTimecode;
+  packet.dts = mLastInputDts = aSample->mTimecode.ToMicroseconds();
   packet.pts = aSample->mTime;
   packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
   packet.pos = aSample->mOffset;
 
   // LibAV provides no API to retrieve the decoded sample's duration.
   // (FFmpeg >= 1.0 provides av_frame_get_pkt_duration)
   // As such we instead use a map using the dts as key that we will retrieve
   // later.
   // The map will have a typical size of 16 entry.
-  mDurationMap.Insert(aSample->mTimecode, aSample->mDuration.ToMicroseconds());
+  mDurationMap.Insert(
+    aSample->mTimecode.ToMicroseconds(), aSample->mDuration.ToMicroseconds());
 
   if (!PrepareFrame()) {
     NS_WARNING("FFmpeg h264 decoder failed to allocate frame.");
     return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
   }
 
   // Required with old version of FFmpeg/LibAV
   mFrame->reordered_opaque = AV_NOPTS_VALUE;
@@ -357,17 +358,17 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
   }
   return NS_OK;
 }
 
 RefPtr<MediaDataDecoder::DecodePromise>
 FFmpegVideoDecoder<LIBAV_VER>::ProcessDrain()
 {
   RefPtr<MediaRawData> empty(new MediaRawData());
-  empty->mTimecode = mLastInputDts;
+  empty->mTimecode = media::TimeUnit::FromMicroseconds(mLastInputDts);
   bool gotFrame = false;
   DecodedData results;
   while (NS_SUCCEEDED(DoDecode(empty, &gotFrame, results)) && gotFrame) {
   }
   return DecodePromise::CreateAndResolve(Move(results), __func__);
 }
 
 RefPtr<MediaDataDecoder::FlushPromise>
--- a/dom/media/wave/WaveDemuxer.cpp
+++ b/dom/media/wave/WaveDemuxer.cpp
@@ -535,17 +535,17 @@ WAVTrackDemuxer::GetNextChunk(const Medi
 
   if (static_cast<uint32_t>(mChunkIndex) * DATA_CHUNK_SIZE < mDataLength) {
     datachunk->mDuration = Duration(1);
   } else {
     uint32_t mBytesRemaining =
       mDataLength - mChunkIndex * DATA_CHUNK_SIZE;
     datachunk->mDuration = DurationFromBytes(mBytesRemaining);
   }
-  datachunk->mTimecode = datachunk->mTime;
+  datachunk->mTimecode = media::TimeUnit::FromMicroseconds(datachunk->mTime);
   datachunk->mKeyframe = true;
 
   MOZ_ASSERT(datachunk->mTime >= 0);
   MOZ_ASSERT(!datachunk->mDuration.IsNegative());
 
   return datachunk.forget();
 }
 
--- a/dom/media/webm/WebMDemuxer.cpp
+++ b/dom/media/webm/WebMDemuxer.cpp
@@ -717,17 +717,17 @@ WebMDemuxer::GetNextPacket(TrackInfo::Tr
       }
     } else {
       sample = new MediaRawData(data, length);
       if (length && !sample->Data()) {
         // OOM.
         return NS_ERROR_OUT_OF_MEMORY;
       }
     }
-    sample->mTimecode = tstamp;
+    sample->mTimecode = media::TimeUnit::FromMicroseconds(tstamp);
     sample->mTime = tstamp;
     sample->mDuration = media::TimeUnit::FromMicroseconds(next_tstamp - tstamp);
     sample->mOffset = holder->Offset();
     sample->mKeyframe = isKeyframe;
     if (discardPadding && i == count - 1) {
       CheckedInt64 discardFrames;
       if (discardPadding < 0) {
         // This is an invalid value as discard padding should never be negative.
@@ -1152,27 +1152,27 @@ WebMTrackDemuxer::SetNextKeyFrameTime()
       frameTime = sample->mTime;
       foundKeyframe = true;
     }
     skipSamplesQueue.Push(sample.forget());
   }
   Maybe<int64_t> startTime;
   if (skipSamplesQueue.GetSize()) {
     const RefPtr<MediaRawData>& sample = skipSamplesQueue.First();
-    startTime.emplace(sample->mTimecode);
+    startTime.emplace(sample->mTimecode.ToMicroseconds());
   }
   // Demux and buffer frames until we find a keyframe.
   RefPtr<MediaRawData> sample;
   nsresult rv = NS_OK;
   while (!foundKeyframe && NS_SUCCEEDED((rv = NextSample(sample)))) {
     if (sample->mKeyframe) {
       frameTime = sample->mTime;
       foundKeyframe = true;
     }
-    int64_t sampleTimecode = sample->mTimecode;
+    int64_t sampleTimecode = sample->mTimecode.ToMicroseconds();
     skipSamplesQueue.Push(sample.forget());
     if (!startTime) {
       startTime.emplace(sampleTimecode);
     } else if (!foundKeyframe
                && sampleTimecode > startTime.ref() + MAX_LOOK_AHEAD) {
       WEBM_DEBUG("Couldn't find keyframe in a reasonable time, aborting");
       break;
     }
@@ -1181,19 +1181,17 @@ WebMTrackDemuxer::SetNextKeyFrameTime()
   // in the right order.
   mSamples.PushFront(Move(skipSamplesQueue));
 
   if (frameTime != -1) {
     mNextKeyframeTime.emplace(media::TimeUnit::FromMicroseconds(frameTime));
     WEBM_DEBUG("Next Keyframe %f (%u queued %.02fs)",
                mNextKeyframeTime.value().ToSeconds(),
                uint32_t(mSamples.GetSize()),
-               media::TimeUnit::FromMicroseconds(mSamples.Last()->mTimecode
-                                                 - mSamples.First()->mTimecode)
-                 .ToSeconds());
+               (mSamples.Last()->mTimecode - mSamples.First()->mTimecode).ToSeconds());
   } else {
     WEBM_DEBUG("Couldn't determine next keyframe time  (%u queued)",
                uint32_t(mSamples.GetSize()));
   }
 }
 
 void
 WebMTrackDemuxer::Reset()
--- a/media/libstagefright/binding/Index.cpp
+++ b/media/libstagefright/binding/Index.cpp
@@ -95,17 +95,17 @@ already_AddRefed<MediaRawData> SampleIte
   int64_t length = std::numeric_limits<int64_t>::max();
   mIndex->mSource->Length(&length);
   if (s->mByteRange.mEnd > length) {
     // We don't have this complete sample.
     return nullptr;
   }
 
   RefPtr<MediaRawData> sample = new MediaRawData();
-  sample->mTimecode= s->mDecodeTime;
+  sample->mTimecode= TimeUnit::FromMicroseconds(s->mDecodeTime);
   sample->mTime = s->mCompositionRange.start;
   sample->mDuration = TimeUnit::FromMicroseconds(s->mCompositionRange.Length());
   sample->mOffset = s->mByteRange.mStart;
   sample->mKeyframe = s->mSync;
 
   nsAutoPtr<MediaRawDataWriter> writer(sample->CreateWriter());
   // Do the blocking read
   if (!writer->SetSize(s->mByteRange.Length())) {