Bug 1430255 - P1 - update RTPSources timebase RtpSourceObserver and PeerConnectionImpl;r?mjf draft
authorNico Grunbaum
Sun, 21 Jan 2018 14:52:48 -0800
changeset 751944 7247420884f847b6469ba286903ba42e9645a8be
parent 751476 f1a4b64f19b0e93c49492735db30a5023e624ae7
child 751945 98d5cb88c30e33b83491af1a75ee6ebd59f27841
push id98098
push userna-g@nostrum.com
push dateWed, 07 Feb 2018 07:02:32 +0000
reviewersmjf
bugs1430255
milestone60.0a1
Bug 1430255 - P1 - update RTPSources timebase RtpSourceObserver and PeerConnectionImpl;r?mjf MozReview-Commit-ID: 6KwRx0L7iYm
media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
media/webrtc/signaling/src/media-conduit/AudioConduit.h
media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
media/webrtc/signaling/src/media-conduit/RtpSourceObserver.cpp
media/webrtc/signaling/src/media-conduit/RtpSourceObserver.h
media/webrtc/signaling/src/media-conduit/moz.build
media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp
media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.h
media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.cpp
media/webrtc/signaling/src/peerconnection/TransceiverImpl.cpp
media/webrtc/signaling/src/peerconnection/TransceiverImpl.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/include/rtp_packet_observer.h
media/webrtc/trunk/webrtc/voice_engine/channel.cc
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
@@ -15,16 +15,18 @@
 #include "nsCOMPtr.h"
 #include "mozilla/Services.h"
 #include "nsServiceManagerUtils.h"
 #include "nsIPrefService.h"
 #include "nsIPrefBranch.h"
 #include "nsThreadUtils.h"
 #include "Latency.h"
 #include "mozilla/Telemetry.h"
+#include "mozilla/TimeStamp.h"
+
 
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
 #include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
 #include "webrtc/voice_engine/include/voe_errors.h"
 #include "webrtc/voice_engine/voice_engine_impl.h"
 #include "webrtc/system_wrappers/include/clock.h"
 
 #ifdef MOZ_WIDGET_ANDROID
@@ -40,22 +42,24 @@ static const char* acLogTag ="WebrtcAudi
 #define LOGTAG acLogTag
 
 // 32 bytes is what WebRTC CodecInst expects
 const unsigned int WebrtcAudioConduit::CODEC_PLNAME_SIZE = 32;
 
 /**
  * Factory Method for AudioConduit
  */
-RefPtr<AudioSessionConduit> AudioSessionConduit::Create()
+RefPtr<AudioSessionConduit>
+AudioSessionConduit::Create(const TimeStamp& aTimeBase)
 {
   CSFLogDebug(LOGTAG,  "%s ", __FUNCTION__);
   NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
 
   WebrtcAudioConduit* obj = new WebrtcAudioConduit();
+  obj->SetTimeBase(aTimeBase);
   if(obj->Init() != kMediaConduitNoError)
   {
     CSFLogError(LOGTAG,  "%s AudioConduit Init Failed ", __FUNCTION__);
     delete obj;
     return nullptr;
   }
   CSFLogDebug(LOGTAG,  "%s Successfully created AudioConduit ", __FUNCTION__);
   return obj;
@@ -275,46 +279,47 @@ bool WebrtcAudioConduit::InsertDTMFTone(
   if (outOfBand){
     result = mChannelProxy->SendTelephoneEventOutband(eventCode, lengthMs);
   }
   return result != -1;
 }
 
 void
 WebrtcAudioConduit::OnRtpPacket(const webrtc::WebRtcRTPHeader* aHeader,
-                                const int64_t aTimestamp,
                                 const uint32_t aJitter) {
-  mRtpSourceObserver.OnRtpPacket(aHeader, aTimestamp, aJitter);
+  DOMHighResTimeStamp now = (TimeStamp::Now() - mTimeBase).ToMilliseconds();
+  mRtpSourceObserver.OnRtpPacket(aHeader, std::floor(now), aJitter);
 }
 
 void
 WebrtcAudioConduit::GetRtpSources(const int64_t aTimeNow,
                                   nsTArray<dom::RTCRtpSourceEntry>& outSources)
 {
   return mRtpSourceObserver.GetRtpSources(aTimeNow, outSources);
 }
 
 // test-only: inserts a CSRC entry in a RtpSourceObserver's history for
 // getContributingSources mochitests
 void InsertAudioLevelForContributingSource(RtpSourceObserver& observer,
                                            uint32_t aCsrcSource,
-                                           int64_t aTimestamp,
+                                           DOMHighResTimeStamp aTimestamp,
                                            bool aHasAudioLevel,
                                            uint8_t aAudioLevel)
 {
   using EntryType = dom::RTCRtpSourceEntryType;
   auto key = RtpSourceObserver::GetKey(aCsrcSource, EntryType::Contributing);
   auto& hist = observer.mRtpSources[key];
   hist.Insert(aTimestamp, aTimestamp, aHasAudioLevel, aAudioLevel);
 }
 
 
 void
 WebrtcAudioConduit::InsertAudioLevelForContributingSource(uint32_t aCsrcSource,
-                                                          int64_t aTimestamp,
+                                                          DOMHighResTimeStamp
+                                                              aTimestamp,
                                                           bool aHasAudioLevel,
                                                           uint8_t aAudioLevel)
 {
   mozilla::InsertAudioLevelForContributingSource(mRtpSourceObserver,
                                                  aCsrcSource,
                                                  aTimestamp,
                                                  aHasAudioLevel,
                                                  aAudioLevel);
@@ -386,16 +391,22 @@ MediaConduitErrorCode WebrtcAudioConduit
     return kMediaConduitSessionNotInited;
   }
 
   if(!(mPtrVoEVideoSync = VoEVideoSync::GetInterface(mVoiceEngine)))
   {
     CSFLogError(LOGTAG, "%s Unable to initialize VoEVideoSync", __FUNCTION__);
     return kMediaConduitSessionNotInited;
   }
+  if (mTimeBase.IsNull()) {
+    CSFLogError(LOGTAG,
+                "%s Unable to utilize RtpSourceObserver without a time base",
+                __FUNCTION__);
+    return kMediaConduitSessionNotInited;
+  }
   if (!(mPtrRTP = webrtc::VoERTP_RTCP::GetInterface(mVoiceEngine)))
   {
     CSFLogError(LOGTAG, "%s Unable to get audio RTP/RTCP interface ",
                 __FUNCTION__);
     return kMediaConduitSessionNotInited;
   }
 
   if( (mChannel = mPtrVoEBase->CreateChannel()) == -1)
@@ -404,16 +415,17 @@ MediaConduitErrorCode WebrtcAudioConduit
     return kMediaConduitChannelError;
   }
   // Needed to access TelephoneEvent APIs in 57 if we're not using Call/audio_send_stream/etc
   webrtc::VoiceEngineImpl* s = static_cast<webrtc::VoiceEngineImpl*>(mVoiceEngine);
   mChannelProxy = s->GetChannelProxy(mChannel);
   MOZ_ASSERT(mChannelProxy);
   mChannelProxy->SetRtpPacketObserver(this);
 
+
   CSFLogDebug(LOGTAG, "%s Channel Created %d ",__FUNCTION__, mChannel);
 
   if(mPtrVoENetwork->RegisterExternalTransport(mChannel, *this) == -1)
   {
     CSFLogError(LOGTAG, "%s VoiceEngine, External Transport Failed",__FUNCTION__);
     return kMediaConduitTransportRegistrationFail;
   }
 
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.h
@@ -13,16 +13,17 @@
 
 #include "MediaConduitInterface.h"
 #include "MediaEngineWrapper.h"
 #include "RtpSourceObserver.h"
 
 // Audio Engine Includes
 #include "webrtc/common_types.h"
 #include "webrtc/modules/audio_device/include/fake_audio_device.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_packet_observer.h"
 #include "webrtc/voice_engine/include/voe_base.h"
 #include "webrtc/voice_engine/include/voe_volume_control.h"
 #include "webrtc/voice_engine/include/voe_codec.h"
 #include "webrtc/voice_engine/include/voe_file.h"
 #include "webrtc/voice_engine/include/voe_network.h"
 #include "webrtc/voice_engine/include/voe_external_media.h"
 #include "webrtc/voice_engine/include/voe_audio_processing.h"
 #include "webrtc/voice_engine/include/voe_video_sync.h"
@@ -169,16 +170,19 @@ public:
    * AudioConduit registers itself as ExternalTransport to the VoiceEngine
    */
   virtual bool SendRtcp(const uint8_t *data,
                         size_t len) override;
 
   virtual uint64_t CodecPluginID() override { return 0; }
   virtual void SetPCHandle(const std::string& aPCHandle) override {}
 
+  void
+  SetTimeBase(const TimeStamp& aTimeBase) { mTimeBase = aTimeBase; }
+
   explicit WebrtcAudioConduit():
                       mVoiceEngine(nullptr),
                       mFakeAudioDevice(new webrtc::FakeAudioDeviceModule()),
                       mTransportMonitor("WebrtcAudioConduit"),
                       mTransmitterTransport(nullptr),
                       mReceiverTransport(nullptr),
                       mEngineTransmitting(false),
                       mEngineReceiving(false),
@@ -255,22 +259,21 @@ public:
 
   bool InsertDTMFTone(int channel, int eventCode, bool outOfBand,
                       int lengthMs, int attenuationDb) override;
 
   void GetRtpSources(const int64_t aTimeNow,
                      nsTArray<dom::RTCRtpSourceEntry>& outSources) override;
 
   void OnRtpPacket(const webrtc::WebRtcRTPHeader* aRtpHeader,
-                   const int64_t aTimestamp,
                    const uint32_t aJitter) override;
 
   // test-only: inserts fake CSRCs and audio level data
   void InsertAudioLevelForContributingSource(uint32_t aSource,
-                                             int64_t aTimestamp,
+                                             DOMHighResTimeStamp aTimestamp,
                                              bool aHasLevel,
                                              uint8_t aLevel);
 
   bool IsSamplingFreqSupported(int freq) const override;
 
 private:
   WebrtcAudioConduit(const WebrtcAudioConduit& other) = delete;
   void operator=(const WebrtcAudioConduit& other) = delete;
@@ -338,14 +341,15 @@ private:
 
   uint32_t mLastTimestamp;
 
   webrtc::AudioFrame mAudioFrame; // for output pulls
 
   uint32_t mSamples;
   uint32_t mLastSyncLog;
 
+  TimeStamp mTimeBase;
   RtpSourceObserver mRtpSourceObserver;
 };
 
 } // end namespace
 
 #endif
--- a/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
+++ b/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
@@ -25,16 +25,17 @@
 #include "webrtc/common_types.h"
 #include "webrtc/api/video/video_frame_buffer.h"
 #include "webrtc/logging/rtc_event_log/rtc_event_log.h"
 
 #include <vector>
 
 namespace webrtc {
 class VideoFrame;
+class TimeStamp;
 }
 
 namespace mozilla {
 
 using RtpExtList = std::vector<webrtc::RtpExtension>;
 
 // Wrap the webrtc.org Call class adding mozilla add/ref support.
 class WebRtcCallWrapper : public RefCounted<WebRtcCallWrapper>
@@ -445,22 +446,22 @@ public:
  * information
  */
 class AudioSessionConduit : public MediaSessionConduit
 {
 public:
 
  /**
    * Factory function to create and initialize an Audio Conduit Session
-   * @param  webrtc::Call instance shared by paired audio and video
-   *         media conduits
+   * @param  aTimeBase a point in time time to use as the base for time
+   *         measurements
    * @result Concrete AudioSessionConduitObject or nullptr in the case
    *         of failure
    */
-  static RefPtr<AudioSessionConduit> Create();
+  static RefPtr<AudioSessionConduit> Create(const TimeStamp& aTimeBase);
 
   virtual ~AudioSessionConduit() {}
 
   Type type() const override { return AUDIO; }
 
   void
   SetLocalRTPExtensions(bool aIsSend,
                         const RtpExtList& extensions) override {};
--- a/media/webrtc/signaling/src/media-conduit/RtpSourceObserver.cpp
+++ b/media/webrtc/signaling/src/media-conduit/RtpSourceObserver.cpp
@@ -18,18 +18,18 @@ RtpSourceObserver::RtpSourceEntry::ToLin
   return std::pow(10, -static_cast<double>(audioLevel) / 20);
 }
 
 RtpSourceObserver::RtpSourceObserver() :
   mLevelGuard("RtpSourceObserver::mLevelGuard") {}
 
 void
 RtpSourceObserver::OnRtpPacket(const webrtc::WebRtcRTPHeader* aHeader,
-                                     const int64_t aTimestamp,
-                                     const uint32_t aJitter)
+                               const DOMHighResTimeStamp aTimestamp,
+                               const uint32_t aJitter)
 {
   auto& header = aHeader->header;
   MutexAutoLock lock(mLevelGuard);
   {
     mMaxJitterWindow = std::max(mMaxJitterWindow,
                                 static_cast<int64_t>(aJitter) * 2);
     const auto jitterAdjusted = aTimestamp + aJitter;
     auto& hist = mRtpSources[GetKey(header.ssrc, EntryType::Synchronization)];
@@ -48,17 +48,17 @@ RtpSourceObserver::OnRtpPacket(const web
       bool hasLevel = i < list.numAudioLevels;
       uint8_t level = hasLevel ? list.arrOfAudioLevels[i] : 0;
       hist.Insert(aTimestamp, jitterAdjusted, hasLevel, level);
     }
   }
 }
 
 void
-RtpSourceObserver::GetRtpSources(const int64_t aTimeNow,
+RtpSourceObserver::GetRtpSources(const DOMHighResTimeStamp aTimeNow,
     nsTArray<dom::RTCRtpSourceEntry>& outSources) const
 {
   MutexAutoLock lock(mLevelGuard);
   outSources.Clear();
   for (const auto& it : mRtpSources) {
     const RtpSourceEntry* entry = it.second.FindClosestNotAfter(aTimeNow);
     if (entry) {
       dom::RTCRtpSourceEntry domEntry;
@@ -68,22 +68,19 @@ RtpSourceObserver::GetRtpSources(const i
       if (entry->hasAudioLevel) {
         domEntry.mAudioLevel.Construct(entry->ToLinearAudioLevel());
       }
       outSources.AppendElement(std::move(domEntry));
     }
   }
 }
 
-int64_t RtpSourceObserver::NowInReportClockTime() {
-  return webrtc::Clock::GetRealTimeClock()->TimeInMilliseconds();
-}
-
 const RtpSourceObserver::RtpSourceEntry*
-RtpSourceObserver::RtpSourceHistory::FindClosestNotAfter(int64_t aTime) const {
+RtpSourceObserver::RtpSourceHistory::FindClosestNotAfter(
+    DOMHighResTimeStamp aTime) const {
   // This method scans the history for the entry whose timestamp is closest to a
   // given timestamp but no greater. Because it is scanning forward, it keeps
   // track of the closest entry it has found so far in case it overshoots.
   // There is no before map.begin() which complicates things, so found tracks
   // if something was really found.
   auto lastFound = mDetailedHistory.cbegin();
   bool found = false;
   for (const auto& it : mDetailedHistory) {
@@ -101,17 +98,17 @@ RtpSourceObserver::RtpSourceHistory::Fin
   }
   if (HasEvicted() && aTime >= mLatestEviction.jitterAdjustedTimestamp) {
     return &mLatestEviction;
   }
   return nullptr;
 }
 
 void
-RtpSourceObserver::RtpSourceHistory::Prune(const int64_t aTimeNow) {
+RtpSourceObserver::RtpSourceHistory::Prune(const DOMHighResTimeStamp aTimeNow) {
   const auto aTimeT = aTimeNow - mMaxJitterWindow;
   const auto aTimePrehistory = aTimeNow - kHistoryWindow;
   bool found = false;
   // New lower bound of the map
   auto lower = mDetailedHistory.begin();
   for (auto& it : mDetailedHistory) {
     if (it.second.jitterAdjustedTimestamp > aTimeT) {
       found = true;
@@ -132,43 +129,44 @@ RtpSourceObserver::RtpSourceHistory::Pru
   }
   if (HasEvicted() &&
       (mLatestEviction.jitterAdjustedTimestamp + kHistoryWindow) < aTimeNow) {
     mHasEvictedEntry = false;
   }
 }
 
 void
-RtpSourceObserver::RtpSourceHistory::Insert(const int64_t aTimeNow,
-                                            const int64_t aTimestamp,
+RtpSourceObserver::RtpSourceHistory::Insert(const DOMHighResTimeStamp aTimeNow,
+                                            const DOMHighResTimeStamp aTimestamp,
                                             const bool aHasAudioLevel,
                                             const uint8_t aAudioLevel)
 {
   Insert(aTimeNow, aTimestamp).Update(aTimestamp, aHasAudioLevel, aAudioLevel);
 }
 
 RtpSourceObserver::RtpSourceEntry&
-RtpSourceObserver::RtpSourceHistory::Insert(const int64_t aTimeNow,
-                                            const int64_t aTimestamp)
+RtpSourceObserver::RtpSourceHistory::Insert(
+    const DOMHighResTimeStamp aTimeNow,
+    const DOMHighResTimeStamp aTimestamp)
 {
   // Time T is the oldest time inside the jitter window (now - jitter)
   // Time J is the newest time inside the jitter window (now + jitter)
   // Time x is the jitter adjusted entry time
   // Time Z is the time of the long term storage element
   // Times A, B, C are times of entries in the jitter window buffer
   // x-axis: time
   // x or x        T   J
   //  |------Z-----|ABC| -> |------Z-----|ABC|
   if ((aTimestamp + kHistoryWindow) < aTimeNow ||
       aTimestamp < mLatestEviction.jitterAdjustedTimestamp) {
     return mPrehistory; // A.K.A. /dev/null
   }
   mMaxJitterWindow = std::max(mMaxJitterWindow,
                               (aTimestamp - aTimeNow) * 2);
-  const int64_t aTimeT = aTimeNow - mMaxJitterWindow;
+  const DOMHighResTimeStamp aTimeT = aTimeNow - mMaxJitterWindow;
   //           x  T   J
   // |------Z-----|ABC| -> |--------x---|ABC|
   if (aTimestamp < aTimeT) {
     mHasEvictedEntry = true;
     return mLatestEviction;
   }
   //              T  X J
   // |------Z-----|AB-C| -> |--------x---|ABXC|
--- a/media/webrtc/signaling/src/media-conduit/RtpSourceObserver.h
+++ b/media/webrtc/signaling/src/media-conduit/RtpSourceObserver.h
@@ -1,77 +1,74 @@
 #ifndef AUDIOLEVELOBSERVER_H
 #define AUDIOLEVELOBSERVER_H
 
 #include <vector>
 #include <map>
 
+#include "nsDOMNavigationTiming.h"
 #include "mozilla/Mutex.h"
 #include "nsISupportsImpl.h"
 #include "mozilla/dom/RTCRtpSourcesBinding.h"
-#include "webrtc/modules/rtp_rtcp/include/rtp_packet_observer.h"
 
 // Unit Test class
 namespace test {
   class RtpSourcesTest;
 }
 
+namespace webrtc {
+  struct WebRtcRTPHeader;
+}
+
 namespace mozilla {
 
 /* Observes reception of RTP packets and tabulates data about the
  * most recent arival times by source (csrc or ssrc) and audio level information
  *  * csrc-audio-level RTP header extension
  *  * ssrc-audio-level RTP header extension
  */
-class RtpSourceObserver: public webrtc::RtpPacketObserver {
+class RtpSourceObserver {
 public:
 
   RtpSourceObserver();
 
   virtual ~RtpSourceObserver() {};
 
   void OnRtpPacket(const webrtc::WebRtcRTPHeader* aRtpHeader,
-                   const int64_t aTimestamp,
-                   const uint32_t aJitter) override;
-
-  /* Get the local time in MS from the same clock source that is used
-   * to generate the capture timestamps. Use for computing the age of
-   * an entry relative to another clock, e.g. the JS
-   * @return time of now in MS
-   */
-  static int64_t NowInReportClockTime();
+                   const DOMHighResTimeStamp aTimestamp,
+                   const uint32_t aJitter);
 
   /*
    * Get the most recent 10 second window of CSRC and SSRC sources.
    * @param aTimeNow the current report clock time, @see NowInReportClockTime.
    * @param outLevels will be popluted with source entries
    * Note: this takes jitter into account when calculating the window so
    * the window is actually [time - jitter - 10 sec .. time - jitter]
    */
   void
-  GetRtpSources(const int64_t aTimeNow,
+  GetRtpSources(const DOMHighResTimeStamp aTimeNow,
                 nsTArray<dom::RTCRtpSourceEntry>& outSources) const;
 
 private:
   // Note: these are pool allocated
   struct RtpSourceEntry {
     RtpSourceEntry() = default;
-    void Update(const int64_t aTimestamp,
+    void Update(const DOMHighResTimeStamp aTimestamp,
                 const bool aHasAudioLevel,
                 const uint8_t aAudioLevel) {
       jitterAdjustedTimestamp = aTimestamp;
       // Audio level range is 0 - 127 inclusive
       hasAudioLevel = aHasAudioLevel && !(aAudioLevel & 0x80);
       audioLevel = aAudioLevel;
     }
     // Sets the audio level nullable according to the linear scale
     // outlined in the webrtc-pc spec.
     double ToLinearAudioLevel() const;
     // Time this information was received + jitter
-    int64_t jitterAdjustedTimestamp = 0;
+    DOMHighResTimeStamp jitterAdjustedTimestamp = 0;
     bool hasAudioLevel = false;
     uint8_t audioLevel = 0;
   };
   /* Maintains a history of packets for reporting with getContributingSources
    * and getSynchronizationSources. It is expected that entries will not always
    * be observed in chronological order, and that the correct entry for a query
    * not be the most recently added item. Many times the query time is expected
    * to fall within [now - Jitter window .. now + Jitter Window]. A full history
@@ -93,41 +90,42 @@ private:
    *  Q4: I
    */
   class RtpSourceHistory {
   public:
     RtpSourceHistory() = default;
     // Finds the closest entry to a time, and passes that value to a closure
     // Note: the pointer is invalidated by any operation on the history
     // Note: the pointer is owned by the RtpSourceHistory
-    const RtpSourceEntry* FindClosestNotAfter(int64_t aTime) const;
+    const RtpSourceEntry* FindClosestNotAfter(DOMHighResTimeStamp aTime) const;
     // Inserts data into the history, may silently drop data if it is too old
-    void Insert(const int64_t aTimeNow,
-                const int64_t aTimestamp,
+    void Insert(const DOMHighResTimeStamp aTimeNow,
+                const DOMHighResTimeStamp aTimestamp,
                 const bool aHasAudioLevel,
                 const uint8_t aAudioLevel);
     // Removes aged out from the jitter window
-    void Prune(const int64_t aTimeNow);
+    void Prune(const DOMHighResTimeStamp aTimeNow);
     // Set Source
     void SetSource(uint32_t aSource, dom::RTCRtpSourceEntryType aType);
   private:
     // Finds a place to insert data and returns a reference to it
     RtpSourceObserver::RtpSourceEntry&
-    Insert(const int64_t aTimeNow, const int64_t aTimestamp);
+    Insert(const DOMHighResTimeStamp aTimeNow,
+           const DOMHighResTimeStamp aTimestamp);
     // Is the history buffer empty?
     bool Empty() const { return !mDetailedHistory.size(); }
     // Is there an evicted entry
     bool HasEvicted() const { return mHasEvictedEntry; }
 
     // Minimum amount of time (ms) to store a complete packet history
-    constexpr static int64_t kMinJitterWindow = 1000;
+    constexpr static DOMHighResTimeStamp kMinJitterWindow = 1000;
     // Size of the history window (ms)
-    constexpr static int64_t kHistoryWindow = 10000;
+    constexpr static DOMHighResTimeStamp kHistoryWindow = 10000;
     // This is 2 x the maximum observed jitter or the min which ever is higher
-    int64_t mMaxJitterWindow = kMinJitterWindow;
+    DOMHighResTimeStamp mMaxJitterWindow = kMinJitterWindow;
     // The least old entry to be kicked from the buffer.
     RtpSourceEntry mLatestEviction;
     // Is there an evicted entry?
     bool mHasEvictedEntry = false;
     std::map<int64_t, RtpSourceEntry> mDetailedHistory;
     // Entry before history
     RtpSourceEntry mPrehistory;
     // Unit test
@@ -164,15 +162,15 @@ private:
   // Unit test
   friend test::RtpSourcesTest;
 
   // Testing only
   // Inserts additional csrc audio levels for mochitests
   friend void InsertAudioLevelForContributingSource(
       RtpSourceObserver& observer,
       uint32_t aCsrcSource,
-      int64_t aTimestamp,
+      DOMHighResTimeStamp aTimestamp,
       bool aHasAudioLevel,
       uint8_t aAudioLevel);
 };
 }
 #undef NG
 #endif // AUDIOLEVELOBSERVER_H
\ No newline at end of file
--- a/media/webrtc/signaling/src/media-conduit/moz.build
+++ b/media/webrtc/signaling/src/media-conduit/moz.build
@@ -2,16 +2,17 @@
 # vim: set filetype=python:
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 include('../../../webrtc.mozbuild')
 
 LOCAL_INCLUDES += [
     '!/ipc/ipdl/_ipdlheaders',
+    '/dom/base',
     '/ipc/chromium/src',
     '/media/libyuv/libyuv/include',
     '/media/mtransport',
     '/media/webrtc',
     '/media/webrtc/signaling/src/common',
     '/media/webrtc/signaling/src/common/browser_logging',
     '/media/webrtc/signaling/src/common/time_profiling',
     '/media/webrtc/signaling/src/peerconnection',
--- a/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp
@@ -360,16 +360,17 @@ PeerConnectionImpl::PeerConnectionImpl(c
   mAllowIceLoopback = Preferences::GetBool(
     "media.peerconnection.ice.loopback", false);
   mAllowIceLinkLocal = Preferences::GetBool(
     "media.peerconnection.ice.link_local", false);
   mForceIceTcp = Preferences::GetBool(
     "media.peerconnection.ice.force_ice_tcp", false);
   memset(mMaxReceiving, 0, sizeof(mMaxReceiving));
   memset(mMaxSending, 0, sizeof(mMaxSending));
+  mTimeBase = TimeStamp::ProcessCreation();
 }
 
 PeerConnectionImpl::~PeerConnectionImpl()
 {
   if (mTimeCard) {
     STAMP_TIMECARD(mTimeCard, "Destructor Invoked");
     print_timecard(mTimeCard);
     destroy_timecard(mTimeCard);
@@ -2228,17 +2229,17 @@ PeerConnectionImpl::GetRtpSources(
     }
   }
   return NS_OK;
 }
 
 DOMHighResTimeStamp
 PeerConnectionImpl::GetNowInRtpSourceReferenceTime()
 {
-  return RtpSourceObserver::NowInReportClockTime();
+  return std::floor((TimeStamp::Now() - mTimeBase).ToMilliseconds());
 }
 
 // test-only: adds fake CSRCs and audio data
 nsresult
 PeerConnectionImpl::InsertAudioLevelForContributingSource(
     dom::MediaStreamTrack& aRecvTrack,
     unsigned long aSource,
     DOMHighResTimeStamp aTimestamp,
--- a/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.h
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.h
@@ -656,16 +656,18 @@ public:
   void OnMediaError(const std::string& aError);
 
   bool ShouldDumpPacket(size_t level, dom::mozPacketDumpType type,
                         bool sending) const;
 
   void DumpPacket_m(size_t level, dom::mozPacketDumpType type, bool sending,
                     UniquePtr<uint8_t[]>& packet, size_t size);
 
+  const mozilla::TimeStamp& GetTimeBase() { return mTimeBase; }
+
 private:
   virtual ~PeerConnectionImpl();
   PeerConnectionImpl(const PeerConnectionImpl&rhs);
   PeerConnectionImpl& operator=(PeerConnectionImpl);
   nsresult CalculateFingerprint(const std::string& algorithm,
                                 std::vector<uint8_t>* fingerprint) const;
   nsresult ConfigureJsepSessionCodecs();
 
@@ -803,16 +805,18 @@ private:
   std::string mPreviousIcePwd; // used during rollback of ice restart
   unsigned long mIceRestartCount;
   unsigned long mIceRollbackCount;
 
   // Start time of ICE, used for telemetry
   mozilla::TimeStamp mIceStartTime;
   // Start time of call used for Telemetry
   mozilla::TimeStamp mStartTime;
+  // Time base for statistics
+  mozilla::TimeStamp mTimeBase;
 
   bool mHaveConfiguredCodecs;
 
   bool mHaveDataStream;
 
   unsigned int mAddCandidateErrorCount;
 
   bool mTrickle;
--- a/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.cpp
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.cpp
@@ -1139,17 +1139,18 @@ PeerConnectionMedia::AddTransceiver(
 
   RefPtr<TransceiverImpl> transceiver = new TransceiverImpl(
       mParent->GetHandle(),
       aJsepTransceiver,
       mMainThread.get(),
       mSTSThread.get(),
       &aReceiveTrack,
       aSendTrack,
-      mCall.get());
+      mCall.get(),
+      mParent->GetTimeBase());
 
   if (!transceiver->IsValid()) {
     return NS_ERROR_FAILURE;
   }
 
   if (aSendTrack) {
     // implement checking for peerIdentity (where failure == black/silence)
     nsIDocument* doc = mParent->GetWindow()->GetExtantDoc();
--- a/media/webrtc/signaling/src/peerconnection/TransceiverImpl.cpp
+++ b/media/webrtc/signaling/src/peerconnection/TransceiverImpl.cpp
@@ -34,21 +34,23 @@ MOZ_MTLOG_MODULE("transceiverimpl")
 
 TransceiverImpl::TransceiverImpl(
     const std::string& aPCHandle,
     JsepTransceiver* aJsepTransceiver,
     nsIEventTarget* aMainThread,
     nsIEventTarget* aStsThread,
     dom::MediaStreamTrack* aReceiveTrack,
     dom::MediaStreamTrack* aSendTrack,
-    WebRtcCallWrapper* aCallWrapper) :
+    WebRtcCallWrapper* aCallWrapper,
+    const TimeStamp& aTimeBase) :
   mPCHandle(aPCHandle),
   mJsepTransceiver(aJsepTransceiver),
   mHaveStartedReceiving(false),
   mHaveSetupTransport(false),
+  mTimeBase(aTimeBase),
   mMainThread(aMainThread),
   mStsThread(aStsThread),
   mReceiveTrack(aReceiveTrack),
   mSendTrack(aSendTrack),
   mCallWrapper(aCallWrapper)
 {
   if (IsVideo()) {
     InitVideo();
@@ -73,17 +75,17 @@ TransceiverImpl::TransceiverImpl(
 
 TransceiverImpl::~TransceiverImpl() = default;
 
 NS_IMPL_ISUPPORTS0(TransceiverImpl)
 
 void
 TransceiverImpl::InitAudio()
 {
-  mConduit = AudioSessionConduit::Create();
+  mConduit = AudioSessionConduit::Create(mTimeBase);
 
   if (!mConduit) {
     MOZ_MTLOG(ML_ERROR, mPCHandle << "[" << mMid << "]: " << __FUNCTION__ <<
                         ": Failed to create AudioSessionConduit");
     // TODO(bug 1422897): We need a way to record this when it happens in the
     // wild.
     return;
   }
@@ -1037,30 +1039,31 @@ TransceiverImpl::Stop()
 }
 
 bool
 TransceiverImpl::IsVideo() const
 {
   return mJsepTransceiver->GetMediaType() == SdpMediaSection::MediaType::kVideo;
 }
 
-void TransceiverImpl::GetRtpSources(const int64_t aTimeNow,
+void TransceiverImpl::GetRtpSources(const DOMHighResTimeStamp aTimeNow,
     nsTArray<dom::RTCRtpSourceEntry>& outSources) const
 {
   if (IsVideo()) {
     return;
   }
   WebrtcAudioConduit *audio_conduit =
     static_cast<WebrtcAudioConduit*>(mConduit.get());
   audio_conduit->GetRtpSources(aTimeNow, outSources);
 }
 
 
 void TransceiverImpl::InsertAudioLevelForContributingSource(uint32_t aSource,
-                                                            int64_t aTimestamp,
+                                                            DOMHighResTimeStamp
+                                                              aTimestamp,
                                                             bool aHasLevel,
                                                             uint8_t aLevel)
 {
   if (IsVideo()) {
     return;
   }
   WebrtcAudioConduit *audio_conduit =
     static_cast<WebrtcAudioConduit*>(mConduit.get());
--- a/media/webrtc/signaling/src/peerconnection/TransceiverImpl.h
+++ b/media/webrtc/signaling/src/peerconnection/TransceiverImpl.h
@@ -51,17 +51,18 @@ public:
    * set.
    */
   TransceiverImpl(const std::string& aPCHandle,
                   JsepTransceiver* aJsepTransceiver,
                   nsIEventTarget* aMainThread,
                   nsIEventTarget* aStsThread,
                   dom::MediaStreamTrack* aReceiveTrack,
                   dom::MediaStreamTrack* aSendTrack,
-                  WebRtcCallWrapper* aCallWrapper);
+                  WebRtcCallWrapper* aCallWrapper,
+                  const TimeStamp& aTimeBase);
 
   bool IsValid() const
   {
     return !!mConduit;
   }
 
   nsresult UpdateSendTrack(dom::MediaStreamTrack* aSendTrack);
 
@@ -109,22 +110,22 @@ public:
   RefPtr<MediaPipeline> GetReceivePipeline();
 
   void AddRIDExtension(unsigned short aExtensionId);
 
   void AddRIDFilter(const nsAString& aRid);
 
   bool IsVideo() const;
 
-  void GetRtpSources(const int64_t aTimeNow,
+  void GetRtpSources(const DOMHighResTimeStamp aTimeNow,
                      nsTArray<dom::RTCRtpSourceEntry>& outSources) const;
 
   // test-only: insert fake CSRCs and audio levels for testing
   void InsertAudioLevelForContributingSource(uint32_t aSource,
-                                             int64_t aTimestamp,
+                                             DOMHighResTimeStamp aTimestamp,
                                              bool aHasLevel,
                                              uint8_t aLevel);
 
   NS_DECL_THREADSAFE_ISUPPORTS
 
 private:
   virtual ~TransceiverImpl();
   void InitAudio();
@@ -137,16 +138,17 @@ private:
                          bool aSending);
   void Stop();
 
   const std::string mPCHandle;
   RefPtr<JsepTransceiver> mJsepTransceiver;
   std::string mMid;
   bool mHaveStartedReceiving;
   bool mHaveSetupTransport;
+  const TimeStamp mTimeBase;
   nsCOMPtr<nsIEventTarget> mMainThread;
   nsCOMPtr<nsIEventTarget> mStsThread;
   RefPtr<dom::MediaStreamTrack> mReceiveTrack;
   RefPtr<dom::MediaStreamTrack> mSendTrack;
   // state for webrtc.org that is shared between all transceivers
   RefPtr<WebRtcCallWrapper> mCallWrapper;
   RefPtr<TransportFlow> mRtpFlow;
   RefPtr<TransportFlow> mRtcpFlow;
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/include/rtp_packet_observer.h
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/include/rtp_packet_observer.h
@@ -5,15 +5,14 @@ namespace webrtc {
 
 struct WebRtcRTPHeader;
 
 class RtpPacketObserver {
   public:
 
   virtual void
   OnRtpPacket(const WebRtcRTPHeader* aRtpHeader,
-              const int64_t aTimestamp,
               const uint32_t aJitter) = 0;
 };
 
 }
 
 #endif // RTP_AUDIO_LEVEL_OBSERVER_H
--- a/media/webrtc/trunk/webrtc/voice_engine/channel.cc
+++ b/media/webrtc/trunk/webrtc/voice_engine/channel.cc
@@ -790,18 +790,17 @@ int32_t Channel::OnReceivedPayloadData(c
   // getSynchronizationSources.
   if (rtp_source_observer_) {
     const auto playoutFrequency = audio_coding_->PlayoutFrequency();
     uint32_t jitter = 0;
     if (playoutFrequency > 0) {
       const ChannelStatistics stats = statistics_proxy_->GetStats();
       jitter = stats.rtcp.jitter / (playoutFrequency / 1000);
     }
-    rtp_source_observer_->OnRtpPacket(rtpHeader,
-        webrtc::Clock::GetRealTimeClock()->TimeInMilliseconds(), jitter);
+    rtp_source_observer_->OnRtpPacket(rtpHeader, jitter);
   }
   int64_t round_trip_time = 0;
   _rtpRtcpModule->RTT(rtp_receiver_->SSRC(), &round_trip_time, NULL, NULL,
                       NULL);
 
   std::vector<uint16_t> nack_list = audio_coding_->GetNackList(round_trip_time);
   if (!nack_list.empty()) {
     // Can't use nack_list.data() since it's not supported by all