Bug 1404997 - P1. clang-format MediaPipeline.{cpp,h}. r?pehrsons draft
authorJean-Yves Avenard <jyavenard@mozilla.com>
Thu, 30 Nov 2017 13:47:27 +0100
changeset 712473 613be1b740a952f0ab7f36e8c0fcfa622468af1d
parent 712435 4780efa5f11052cfe775e1d1e76d85cfc0bfc1ea
child 712474 e3aa301c1a573c44566a6120a2984db366db81f2
push id93348
push userbmo:jyavenard@mozilla.com
push dateSat, 16 Dec 2017 21:31:13 +0000
reviewerspehrsons
bugs1404997
milestone59.0a1
Bug 1404997 - P1. clang-format MediaPipeline.{cpp,h}. r?pehrsons It's bound to be done automatically, makes it easier to modify later. MozReview-Commit-ID: IQ5TBtS8Z3v
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -56,34 +56,35 @@
 #include "webrtc/base/bind.h"
 
 #include "nsThreadUtils.h"
 
 #include "CSFLog.h"
 
 // Max size given stereo is 480*2*2 = 1920 (10ms of 16-bits stereo audio at
 // 48KHz)
-#define AUDIO_SAMPLE_BUFFER_MAX_BYTES 480*2*2
-static_assert((WEBRTC_MAX_SAMPLE_RATE/100)*sizeof(uint16_t) * 2
+#define AUDIO_SAMPLE_BUFFER_MAX_BYTES (480 * 2 * 2)
+static_assert((WEBRTC_MAX_SAMPLE_RATE / 100) * sizeof(uint16_t) * 2
                <= AUDIO_SAMPLE_BUFFER_MAX_BYTES,
                "AUDIO_SAMPLE_BUFFER_MAX_BYTES is not large enough");
 
 using namespace mozilla;
 using namespace mozilla::dom;
 using namespace mozilla::gfx;
 using namespace mozilla::layers;
 
 static const char* mpLogTag = "MediaPipeline";
 #ifdef LOGTAG
 #undef LOGTAG
 #endif
 #define LOGTAG mpLogTag
 
 namespace mozilla {
-extern mozilla::LogModule* AudioLogModule();
+extern mozilla::LogModule*
+AudioLogModule();
 
 class VideoConverterListener
 {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoConverterListener)
 
   virtual void OnVideoFrameConverted(unsigned char* aVideoFrame,
                                      unsigned int aVideoFrameLength,
@@ -94,19 +95,19 @@ public:
 
   virtual void OnVideoFrameConverted(webrtc::VideoFrame& aVideoFrame) = 0;
 
 protected:
   virtual ~VideoConverterListener() {}
 };
 
 // I420 buffer size macros
-#define YSIZE(x,y) (CheckedInt<int>(x)*(y))
-#define CRSIZE(x,y) ((((x)+1) >> 1) * (((y)+1) >> 1))
-#define I420SIZE(x,y) (YSIZE((x),(y)) + 2 * CRSIZE((x),(y)))
+#define YSIZE(x, y) (CheckedInt<int>(x) * (y))
+#define CRSIZE(x, y) ((((x) + 1) >> 1) * (((y) + 1) >> 1))
+#define I420SIZE(x, y) (YSIZE((x), (y)) + 2 * CRSIZE((x), (y)))
 
 // An async video frame format converter.
 //
 // Input is typically a MediaStream(Track)Listener driven by MediaStreamGraph.
 //
 // We keep track of the size of the TaskQueue so we can drop frames if
 // conversion is taking too long.
 //
@@ -147,34 +148,44 @@ public:
     }
     last_img_ = serial;
 
     // A throttling limit of 1 allows us to convert 2 frames concurrently.
     // It's short enough to not build up too significant a delay, while
     // giving us a margin to not cause some machines to drop every other frame.
     const int32_t queueThrottlingLimit = 1;
     if (mLength > queueThrottlingLimit) {
-      CSFLogDebug(LOGTAG, "VideoFrameConverter %p queue is full. Throttling by throwing away a frame.",
+      CSFLogDebug(LOGTAG,
+                  "VideoFrameConverter %p queue is full. Throttling by "
+                  "throwing away a frame.",
                   this);
 #ifdef DEBUG
       ++mThrottleCount;
       mThrottleRecord = std::max(mThrottleCount, mThrottleRecord);
 #endif
       return;
     }
 
 #ifdef DEBUG
     if (mThrottleCount > 0) {
       if (mThrottleCount > 5) {
         // Log at a higher level when we have large drops.
-        CSFLogInfo(LOGTAG, "VideoFrameConverter %p stopped throttling after throwing away %d frames. Longest throttle so far was %d frames.",
-                   this, mThrottleCount, mThrottleRecord);
+        CSFLogInfo(LOGTAG,
+                   "VideoFrameConverter %p stopped throttling after throwing "
+                   "away %d frames. Longest throttle so far was %d frames.",
+                   this,
+                   mThrottleCount,
+                   mThrottleRecord);
       } else {
-        CSFLogDebug(LOGTAG, "VideoFrameConverter %p stopped throttling after throwing away %d frames. Longest throttle so far was %d frames.",
-                    this, mThrottleCount, mThrottleRecord);
+        CSFLogDebug(LOGTAG,
+                    "VideoFrameConverter %p stopped throttling after throwing "
+                    "away %d frames. Longest throttle so far was %d frames.",
+                    this,
+                    mThrottleCount,
+                    mThrottleRecord);
       }
       mThrottleCount = 0;
     }
 #endif
 
     bool forceBlack = aForceBlack || aChunk.mFrame.GetForceBlack();
 
     if (forceBlack) {
@@ -200,18 +211,20 @@ public:
       disabled_frame_sent_ = TimeStamp();
     }
 
     ++mLength; // Atomic
 
     nsCOMPtr<nsIRunnable> runnable =
       NewRunnableMethod<StoreRefPtrPassByPtr<Image>, bool>(
         "VideoFrameConverter::ProcessVideoFrame",
-        this, &VideoFrameConverter::ProcessVideoFrame,
-        aChunk.mFrame.GetImage(), forceBlack);
+        this,
+        &VideoFrameConverter::ProcessVideoFrame,
+        aChunk.mFrame.GetImage(),
+        forceBlack);
     nsresult rv = mTaskQueue->Dispatch(runnable.forget());
     MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
   }
 
   void AddListener(VideoConverterListener* aListener)
   {
     MutexAutoLock lock(mMutex);
 
@@ -228,28 +241,22 @@ public:
 
   void Shutdown()
   {
     mTaskQueue->BeginShutdown();
     mTaskQueue->AwaitShutdownAndIdle();
   }
 
 protected:
-  virtual ~VideoFrameConverter()
-  {
-    MOZ_COUNT_DTOR(VideoFrameConverter);
-  }
+  virtual ~VideoFrameConverter() { MOZ_COUNT_DTOR(VideoFrameConverter); }
 
-  static void DeleteBuffer(uint8 *data)
-  {
-    delete[] data;
-  }
+  static void DeleteBuffer(uint8* data) { delete[] data; }
 
-  // This takes ownership of the buffer and attached it to the VideoFrame we send
-  // to the listeners
+  // This takes ownership of the buffer and attached it to the VideoFrame we
+  // send to the listeners
   void VideoFrameConverted(UniquePtr<uint8[]> aBuffer,
                            unsigned int aVideoFrameLength,
                            unsigned short aWidth,
                            unsigned short aHeight,
                            VideoType aVideoType,
                            uint64_t aCaptureTime)
   {
     // check for parameter sanity
@@ -263,24 +270,30 @@ protected:
     const int stride_y = aWidth;
     const int stride_uv = (aWidth + 1) / 2;
 
     const uint8_t* buffer_y = aBuffer.get();
     const uint8_t* buffer_u = buffer_y + stride_y * aHeight;
     const uint8_t* buffer_v = buffer_u + stride_uv * ((aHeight + 1) / 2);
     rtc::scoped_refptr<webrtc::WrappedI420Buffer> video_frame_buffer(
       new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
-        aWidth, aHeight,
-        buffer_y, stride_y,
-        buffer_u, stride_uv,
-        buffer_v, stride_uv,
+        aWidth,
+        aHeight,
+        buffer_y,
+        stride_y,
+        buffer_u,
+        stride_uv,
+        buffer_v,
+        stride_uv,
         rtc::Bind(&DeleteBuffer, aBuffer.release())));
 
-    webrtc::VideoFrame video_frame(video_frame_buffer, aCaptureTime,
-                                   aCaptureTime, webrtc::kVideoRotation_0); // XXX
+    webrtc::VideoFrame video_frame(video_frame_buffer,
+                                   aCaptureTime,
+                                   aCaptureTime,
+                                   webrtc::kVideoRotation_0); // XXX
     VideoFrameConverted(video_frame);
   }
 
   void VideoFrameConverted(webrtc::VideoFrame& aVideoFrame)
   {
     MutexAutoLock lock(mMutex);
 
     for (RefPtr<VideoConverterListener>& listener : mListeners) {
@@ -308,69 +321,81 @@ protected:
       auto pixelData = MakeUniqueFallible<uint8_t[]>(length.value());
       if (pixelData) {
         // YCrCb black = 0x10 0x80 0x80
         memset(pixelData.get(), 0x10, yPlaneLen.value());
         // Fill Cb/Cr planes
         memset(pixelData.get() + yPlaneLen.value(), 0x80, cbcrPlaneLen);
 
         CSFLogDebug(LOGTAG, "Sending a black video frame");
-        VideoFrameConverted(Move(pixelData), length.value(),
-                            size.width, size.height,
-                            mozilla::kVideoI420, 0);
+        VideoFrameConverted(Move(pixelData),
+                            length.value(),
+                            size.width,
+                            size.height,
+                            mozilla::kVideoI420,
+                            0);
       }
       return;
     }
 
     ImageFormat format = aImage->GetFormat();
     if (format == ImageFormat::PLANAR_YCBCR) {
       // Cast away constness b/c some of the accessors are non-const
-      PlanarYCbCrImage* yuv = const_cast<PlanarYCbCrImage *>(
-          static_cast<const PlanarYCbCrImage *>(aImage));
+      PlanarYCbCrImage* yuv = const_cast<PlanarYCbCrImage*>(
+        static_cast<const PlanarYCbCrImage*>(aImage));
 
-      const PlanarYCbCrData *data = yuv->GetData();
+      const PlanarYCbCrData* data = yuv->GetData();
       if (data) {
-        uint8_t *y = data->mYChannel;
-        uint8_t *cb = data->mCbChannel;
-        uint8_t *cr = data->mCrChannel;
+        uint8_t* y = data->mYChannel;
+        uint8_t* cb = data->mCbChannel;
+        uint8_t* cr = data->mCrChannel;
         int32_t yStride = data->mYStride;
         int32_t cbCrStride = data->mCbCrStride;
         uint32_t width = yuv->GetSize().width;
         uint32_t height = yuv->GetSize().height;
 
         rtc::Callback0<void> callback_unused;
         rtc::scoped_refptr<webrtc::WrappedI420Buffer> video_frame_buffer(
           new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
-            width, height,
-            y, yStride,
-            cb, cbCrStride,
-            cr, cbCrStride,
+            width,
+            height,
+            y,
+            yStride,
+            cb,
+            cbCrStride,
+            cr,
+            cbCrStride,
             callback_unused));
 
         webrtc::VideoFrame i420_frame(video_frame_buffer,
-                                      0, 0, // not setting timestamps
+                                      0,
+                                      0, // not setting timestamps
                                       webrtc::kVideoRotation_0);
         CSFLogDebug(LOGTAG, "Sending an I420 video frame");
         VideoFrameConverted(i420_frame);
         return;
       }
     }
 
     RefPtr<SourceSurface> surf = aImage->GetAsSourceSurface();
     if (!surf) {
-      CSFLogError(LOGTAG, "Getting surface from %s image failed",
+      CSFLogError(LOGTAG,
+                  "Getting surface from %s image failed",
                   Stringify(format).c_str());
       return;
     }
 
     RefPtr<DataSourceSurface> data = surf->GetDataSurface();
     if (!data) {
-      CSFLogError(LOGTAG, "Getting data surface from %s image with %s (%s) surface failed",
-                  Stringify(format).c_str(), Stringify(surf->GetType()).c_str(),
-                  Stringify(surf->GetFormat()).c_str());
+      CSFLogError(
+        LOGTAG,
+        "Getting data surface from %s image with %s (%s) surface failed",
+        Stringify(format).c_str(),
+        Stringify(surf->GetType()).c_str(),
+        Stringify(surf->GetFormat()).c_str());
       return;
     }
 
     IntSize size = aImage->GetSize();
     // these don't need to be CheckedInt, any overflow will be caught by YSIZE
     int half_width = (size.width + 1) >> 1;
     int half_height = (size.height + 1) >> 1;
     int c_size = half_width * half_height;
@@ -383,64 +408,83 @@ protected:
     auto yuv_scoped = MakeUniqueFallible<uint8[]>(buffer_size.value());
     if (!yuv_scoped) {
       return;
     }
     uint8* yuv = yuv_scoped.get();
 
     DataSourceSurface::ScopedMap map(data, DataSourceSurface::READ);
     if (!map.IsMapped()) {
-      CSFLogError(LOGTAG, "Reading DataSourceSurface from %s image with %s (%s) surface failed",
-                  Stringify(format).c_str(), Stringify(surf->GetType()).c_str(),
-                  Stringify(surf->GetFormat()).c_str());
+      CSFLogError(
+        LOGTAG,
+        "Reading DataSourceSurface from %s image with %s (%s) surface failed",
+        Stringify(format).c_str(),
+        Stringify(surf->GetType()).c_str(),
+        Stringify(surf->GetFormat()).c_str());
       return;
     }
 
     int rv;
     int cb_offset = YSIZE(size.width, size.height).value();
     int cr_offset = cb_offset + c_size;
     switch (surf->GetFormat()) {
       case SurfaceFormat::B8G8R8A8:
       case SurfaceFormat::B8G8R8X8:
         rv = libyuv::ARGBToI420(static_cast<uint8*>(map.GetData()),
                                 map.GetStride(),
-                                yuv, size.width,
-                                yuv + cb_offset, half_width,
-                                yuv + cr_offset, half_width,
-                                size.width, size.height);
+                                yuv,
+                                size.width,
+                                yuv + cb_offset,
+                                half_width,
+                                yuv + cr_offset,
+                                half_width,
+                                size.width,
+                                size.height);
         break;
       case SurfaceFormat::R5G6B5_UINT16:
         rv = libyuv::RGB565ToI420(static_cast<uint8*>(map.GetData()),
                                   map.GetStride(),
-                                  yuv, size.width,
-                                  yuv + cb_offset, half_width,
-                                  yuv + cr_offset, half_width,
-                                  size.width, size.height);
+                                  yuv,
+                                  size.width,
+                                  yuv + cb_offset,
+                                  half_width,
+                                  yuv + cr_offset,
+                                  half_width,
+                                  size.width,
+                                  size.height);
         break;
       default:
-        CSFLogError(LOGTAG, "Unsupported RGB video format %s",
+        CSFLogError(LOGTAG,
+                    "Unsupported RGB video format %s",
                     Stringify(surf->GetFormat()).c_str());
         MOZ_ASSERT(PR_FALSE);
         return;
     }
     if (rv != 0) {
-      CSFLogError(LOGTAG, "%s to I420 conversion failed",
+      CSFLogError(LOGTAG,
+                  "%s to I420 conversion failed",
                   Stringify(surf->GetFormat()).c_str());
       return;
     }
-    CSFLogDebug(LOGTAG, "Sending an I420 video frame converted from %s",
+    CSFLogDebug(LOGTAG,
+                "Sending an I420 video frame converted from %s",
                 Stringify(surf->GetFormat()).c_str());
-    VideoFrameConverted(Move(yuv_scoped), buffer_size.value(), size.width, size.height, mozilla::kVideoI420, 0);
+    VideoFrameConverted(Move(yuv_scoped),
+                        buffer_size.value(),
+                        size.width,
+                        size.height,
+                        mozilla::kVideoI420,
+                        0);
   }
 
   Atomic<int32_t, Relaxed> mLength;
   RefPtr<TaskQueue> mTaskQueue;
 
   // Written and read from the queueing thread (normally MSG).
-  int32_t last_img_; // serial number of last Image
+  int32_t last_img_;              // serial number of last Image
   TimeStamp disabled_frame_sent_; // The time we sent the last disabled frame.
 #ifdef DEBUG
   uint32_t mThrottleCount;
   uint32_t mThrottleRecord;
 #endif
 
   // mMutex guards the below variables.
   Mutex mMutex;
@@ -451,108 +495,117 @@ protected:
 // on the MSG/input audio thread.  Basically just bounces all the audio
 // data to a single audio processing/input queue.  We could if we wanted to
 // use multiple threads and a TaskQueue.
 class AudioProxyThread
 {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioProxyThread)
 
-  explicit AudioProxyThread(AudioSessionConduit *aConduit)
+  explicit AudioProxyThread(AudioSessionConduit* aConduit)
     : mConduit(aConduit)
   {
     MOZ_ASSERT(mConduit);
     MOZ_COUNT_CTOR(AudioProxyThread);
 
     // Use only 1 thread; also forces FIFO operation
     // We could use multiple threads, but that may be dicier with the webrtc.org
     // code.  If so we'd need to use TaskQueues like the videoframe converter
     RefPtr<SharedThreadPool> pool =
       SharedThreadPool::Get(NS_LITERAL_CSTRING("AudioProxy"), 1);
 
     mThread = pool.get();
   }
 
   // called on mThread
-  void InternalProcessAudioChunk(
-    TrackRate rate,
-    AudioChunk& chunk,
-    bool enabled) {
+  void InternalProcessAudioChunk(TrackRate rate,
+                                 AudioChunk& chunk,
+                                 bool enabled)
+  {
 
     // Convert to interleaved, 16-bits integer audio, with a maximum of two
     // channels (since the WebRTC.org code below makes the assumption that the
     // input audio is either mono or stereo).
     uint32_t outputChannels = chunk.ChannelCount() == 1 ? 1 : 2;
     const int16_t* samples = nullptr;
     UniquePtr<int16_t[]> convertedSamples;
 
-    // We take advantage of the fact that the common case (microphone directly to
-    // PeerConnection, that is, a normal call), the samples are already 16-bits
-    // mono, so the representation in interleaved and planar is the same, and we
-    // can just use that.
-    if (enabled && outputChannels == 1 && chunk.mBufferFormat == AUDIO_FORMAT_S16) {
+    // We take advantage of the fact that the common case (microphone directly
+    // to PeerConnection, that is, a normal call), the samples are already
+    // 16-bits mono, so the representation in interleaved and planar is the
+    // same, and we can just use that.
+    if (enabled && outputChannels == 1 &&
+        chunk.mBufferFormat == AUDIO_FORMAT_S16) {
       samples = chunk.ChannelData<int16_t>().Elements()[0];
     } else {
-      convertedSamples = MakeUnique<int16_t[]>(chunk.mDuration * outputChannels);
+      convertedSamples =
+        MakeUnique<int16_t[]>(chunk.mDuration * outputChannels);
 
       if (!enabled || chunk.mBufferFormat == AUDIO_FORMAT_SILENCE) {
         PodZero(convertedSamples.get(), chunk.mDuration * outputChannels);
       } else if (chunk.mBufferFormat == AUDIO_FORMAT_FLOAT32) {
         DownmixAndInterleave(chunk.ChannelData<float>(),
-                             chunk.mDuration, chunk.mVolume, outputChannels,
+                             chunk.mDuration,
+                             chunk.mVolume,
+                             outputChannels,
                              convertedSamples.get());
       } else if (chunk.mBufferFormat == AUDIO_FORMAT_S16) {
         DownmixAndInterleave(chunk.ChannelData<int16_t>(),
-                             chunk.mDuration, chunk.mVolume, outputChannels,
+                             chunk.mDuration,
+                             chunk.mVolume,
+                             outputChannels,
                              convertedSamples.get());
       }
       samples = convertedSamples.get();
     }
 
-    MOZ_ASSERT(!(rate%100)); // rate should be a multiple of 100
+    MOZ_ASSERT(!(rate % 100)); // rate should be a multiple of 100
 
-    // Check if the rate or the number of channels has changed since the last time
-    // we came through. I realize it may be overkill to check if the rate has
-    // changed, but I believe it is possible (e.g. if we change sources) and it
-    // costs us very little to handle this case.
+    // Check if the rate or the number of channels has changed since the last
+    // time we came through. I realize it may be overkill to check if the rate
+    // has changed, but I believe it is possible (e.g. if we change sources) and
+    // it costs us very little to handle this case.
 
     uint32_t audio_10ms = rate / 100;
 
-    if (!packetizer_ ||
-        packetizer_->PacketSize() != audio_10ms ||
+    if (!packetizer_ || packetizer_->PacketSize() != audio_10ms ||
         packetizer_->Channels() != outputChannels) {
       // It's ok to drop the audio still in the packetizer here.
-      packetizer_ = new AudioPacketizer<int16_t, int16_t>(audio_10ms, outputChannels);
+      packetizer_ =
+        new AudioPacketizer<int16_t, int16_t>(audio_10ms, outputChannels);
     }
 
     packetizer_->Input(samples, chunk.mDuration);
 
     while (packetizer_->PacketsAvailable()) {
       packetizer_->Output(packet_);
-      mConduit->SendAudioFrame(packet_, packetizer_->PacketSize(), rate, packetizer_->Channels(), 0);
+      mConduit->SendAudioFrame(
+        packet_, packetizer_->PacketSize(), rate, packetizer_->Channels(), 0);
     }
   }
 
   void QueueAudioChunk(TrackRate rate, AudioChunk& chunk, bool enabled)
   {
     RUN_ON_THREAD(mThread,
                   WrapRunnable(RefPtr<AudioProxyThread>(this),
                                &AudioProxyThread::InternalProcessAudioChunk,
-                               rate, chunk, enabled),
+                               rate,
+                               chunk,
+                               enabled),
                   NS_DISPATCH_NORMAL);
   }
 
 protected:
   virtual ~AudioProxyThread()
   {
-    // Conduits must be released on MainThread, and we might have the last reference
-    // We don't need to worry about runnables still trying to access the conduit, since
-    // the runnables hold a ref to AudioProxyThread.
-    NS_ReleaseOnMainThreadSystemGroup(
-      "AudioProxyThread::mConduit", mConduit.forget());
+    // Conduits must be released on MainThread, and we might have the last
+    // reference We don't need to worry about runnables still trying to access
+    // the conduit, since the runnables hold a ref to AudioProxyThread.
+    NS_ReleaseOnMainThreadSystemGroup("AudioProxyThread::mConduit",
+                                      mConduit.forget());
     MOZ_COUNT_DTOR(AudioProxyThread);
   }
 
   RefPtr<AudioSessionConduit> mConduit;
   nsCOMPtr<nsIEventTarget> mThread;
   // Only accessed on mThread
   nsAutoPtr<AudioPacketizer<int16_t, int16_t>> packetizer_;
   // A buffer to hold a single packet of audio.
@@ -561,62 +614,63 @@ protected:
 
 static char kDTLSExporterLabel[] = "EXTRACTOR-dtls_srtp";
 
 MediaPipeline::MediaPipeline(const std::string& pc,
                              Direction direction,
                              nsCOMPtr<nsIEventTarget> main_thread,
                              nsCOMPtr<nsIEventTarget> sts_thread,
                              RefPtr<MediaSessionConduit> conduit)
-  : direction_(direction),
-    level_(0),
-    conduit_(conduit),
-    rtp_(nullptr, RTP),
-    rtcp_(nullptr, RTCP),
-    main_thread_(main_thread),
-    sts_thread_(sts_thread),
-    rtp_packets_sent_(0),
-    rtcp_packets_sent_(0),
-    rtp_packets_received_(0),
-    rtcp_packets_received_(0),
-    rtp_bytes_sent_(0),
-    rtp_bytes_received_(0),
-    pc_(pc),
-    description_(),
-    rtp_parser_(webrtc::RtpHeaderParser::Create()){
+  : direction_(direction)
+  , level_(0)
+  , conduit_(conduit)
+  , rtp_(nullptr, RTP)
+  , rtcp_(nullptr, RTCP)
+  , main_thread_(main_thread)
+  , sts_thread_(sts_thread)
+  , rtp_packets_sent_(0)
+  , rtcp_packets_sent_(0)
+  , rtp_packets_received_(0)
+  , rtcp_packets_received_(0)
+  , rtp_bytes_sent_(0)
+  , rtp_bytes_received_(0)
+  , pc_(pc)
+  , description_()
+  , rtp_parser_(webrtc::RtpHeaderParser::Create())
+{
   // PipelineTransport() will access this->sts_thread_; moved here for safety
   transport_ = new PipelineTransport(this);
   packet_dumper_ = new PacketDumper(pc_);
 
   if (direction_ == RECEIVE) {
     conduit_->SetReceiverTransport(transport_);
   } else {
     conduit_->SetTransmitterTransport(transport_);
   }
 }
 
-MediaPipeline::~MediaPipeline() {
+MediaPipeline::~MediaPipeline()
+{
   CSFLogInfo(LOGTAG, "Destroying MediaPipeline: %s", description_.c_str());
   // MediaSessionConduit insists that it be released on main.
-  RUN_ON_THREAD(main_thread_, WrapRelease(conduit_.forget()),
-      NS_DISPATCH_NORMAL);
+  RUN_ON_THREAD(
+    main_thread_, WrapRelease(conduit_.forget()), NS_DISPATCH_NORMAL);
 }
 
 void
 MediaPipeline::Shutdown_m()
 {
   CSFLogInfo(LOGTAG, "%s in %s", description_.c_str(), __FUNCTION__);
 
   Stop();
   DetachMedia();
 
   RUN_ON_THREAD(sts_thread_,
-                WrapRunnable(
-                    RefPtr<MediaPipeline>(this),
-                    &MediaPipeline::DetachTransport_s),
+                WrapRunnable(RefPtr<MediaPipeline>(this),
+                             &MediaPipeline::DetachTransport_s),
                 NS_DISPATCH_NORMAL);
 }
 
 void
 MediaPipeline::DetachTransport_s()
 {
   ASSERT_ON_THREAD(sts_thread_);
 
@@ -654,22 +708,21 @@ MediaPipeline::AttachTransport_s()
 }
 
 void
 MediaPipeline::UpdateTransport_m(RefPtr<TransportFlow> rtp_transport,
                                  RefPtr<TransportFlow> rtcp_transport,
                                  nsAutoPtr<MediaPipelineFilter> filter)
 {
   RUN_ON_THREAD(sts_thread_,
-                WrapRunnable(
-                    RefPtr<MediaPipeline>(this),
-                    &MediaPipeline::UpdateTransport_s,
-                    rtp_transport,
-                    rtcp_transport,
-                    filter),
+                WrapRunnable(RefPtr<MediaPipeline>(this),
+                             &MediaPipeline::UpdateTransport_s,
+                             rtp_transport,
+                             rtcp_transport,
+                             filter),
                 NS_DISPATCH_NORMAL);
 }
 
 void
 MediaPipeline::UpdateTransport_s(RefPtr<TransportFlow> rtp_transport,
                                  RefPtr<TransportFlow> rtcp_transport,
                                  nsAutoPtr<MediaPipelineFilter> filter)
 {
@@ -732,273 +785,319 @@ void
 MediaPipeline::AddRIDFilter_s(const std::string& rid)
 {
   filter_ = new MediaPipelineFilter;
   filter_->AddRemoteRtpStreamId(rid);
 }
 
 void
 MediaPipeline::GetContributingSourceStats(
-    const nsString& aInboundRtpStreamId,
-    FallibleTArray<dom::RTCRTPContributingSourceStats>& aArr) const
+  const nsString& aInboundRtpStreamId,
+  FallibleTArray<dom::RTCRTPContributingSourceStats>& aArr) const
 {
   // Get the expiry from now
   DOMHighResTimeStamp expiry = RtpCSRCStats::GetExpiryFromTime(GetNow());
   for (auto info : csrc_stats_) {
     if (!info.second.Expired(expiry)) {
       RTCRTPContributingSourceStats stats;
       info.second.GetWebidlInstance(stats, aInboundRtpStreamId);
       aArr.AppendElement(stats, fallible);
     }
   }
 }
 
-void MediaPipeline::StateChange(TransportFlow *flow, TransportLayer::State state) {
+void
+MediaPipeline::StateChange(TransportFlow* flow, TransportLayer::State state)
+{
   TransportInfo* info = GetTransportInfo_s(flow);
   MOZ_ASSERT(info);
 
   if (state == TransportLayer::TS_OPEN) {
     CSFLogInfo(LOGTAG, "Flow is ready");
     TransportReady_s(*info);
   } else if (state == TransportLayer::TS_CLOSED ||
              state == TransportLayer::TS_ERROR) {
     TransportFailed_s(*info);
   }
 }
 
-static bool MakeRtpTypeToStringArray(const char** array) {
+static bool
+MakeRtpTypeToStringArray(const char** array)
+{
   static const char* RTP_str = "RTP";
   static const char* RTCP_str = "RTCP";
   static const char* MUX_str = "RTP/RTCP mux";
   array[MediaPipeline::RTP] = RTP_str;
   array[MediaPipeline::RTCP] = RTCP_str;
   array[MediaPipeline::MUX] = MUX_str;
   return true;
 }
 
-static const char* ToString(MediaPipeline::RtpType type) {
-  static const char* array[(int)MediaPipeline::MAX_RTP_TYPE] = {nullptr};
+static const char*
+ToString(MediaPipeline::RtpType type)
+{
+  static const char* array[(int)MediaPipeline::MAX_RTP_TYPE] = { nullptr };
   // Dummy variable to cause init to happen only on first call
   static bool dummy = MakeRtpTypeToStringArray(array);
   (void)dummy;
   return array[type];
 }
 
-nsresult MediaPipeline::TransportReady_s(TransportInfo &info) {
+nsresult
+MediaPipeline::TransportReady_s(TransportInfo& info)
+{
   MOZ_ASSERT(!description_.empty());
 
   // TODO(ekr@rtfm.com): implement some kind of notification on
   // failure. bug 852665.
   if (info.state_ != MP_CONNECTING) {
-    CSFLogError(LOGTAG, "Transport ready for flow in wrong state:%s :%s",
-                description_.c_str(), ToString(info.type_));
+    CSFLogError(LOGTAG,
+                "Transport ready for flow in wrong state:%s :%s",
+                description_.c_str(),
+                ToString(info.type_));
     return NS_ERROR_FAILURE;
   }
 
-  CSFLogInfo(LOGTAG, "Transport ready for pipeline %p flow %s: %s", this,
-             description_.c_str(), ToString(info.type_));
+  CSFLogInfo(LOGTAG,
+             "Transport ready for pipeline %p flow %s: %s",
+             this,
+             description_.c_str(),
+             ToString(info.type_));
 
   // TODO(bcampen@mozilla.com): Should we disconnect from the flow on failure?
   nsresult res;
 
   // Now instantiate the SRTP objects
-  TransportLayerDtls *dtls = static_cast<TransportLayerDtls *>(
-      info.transport_->GetLayer(TransportLayerDtls::ID()));
-  MOZ_ASSERT(dtls);  // DTLS is mandatory
+  TransportLayerDtls* dtls = static_cast<TransportLayerDtls*>(
+    info.transport_->GetLayer(TransportLayerDtls::ID()));
+  MOZ_ASSERT(dtls); // DTLS is mandatory
 
   uint16_t cipher_suite;
   res = dtls->GetSrtpCipher(&cipher_suite);
   if (NS_FAILED(res)) {
     CSFLogError(LOGTAG, "Failed to negotiate DTLS-SRTP. This is an error");
     info.state_ = MP_CLOSED;
     UpdateRtcpMuxState(info);
     return res;
   }
 
   // SRTP Key Exporter as per RFC 5764 S 4.2
   unsigned char srtp_block[SRTP_TOTAL_KEY_LENGTH * 2];
-  res = dtls->ExportKeyingMaterial(kDTLSExporterLabel, false, "",
-                                   srtp_block, sizeof(srtp_block));
+  res = dtls->ExportKeyingMaterial(
+    kDTLSExporterLabel, false, "", srtp_block, sizeof(srtp_block));
   if (NS_FAILED(res)) {
     CSFLogError(LOGTAG, "Failed to compute DTLS-SRTP keys. This is an error");
     info.state_ = MP_CLOSED;
     UpdateRtcpMuxState(info);
-    MOZ_CRASH();  // TODO: Remove once we have enough field experience to
-                  // know it doesn't happen. bug 798797. Note that the
-                  // code after this never executes.
+    MOZ_CRASH(); // TODO: Remove once we have enough field experience to
+                 // know it doesn't happen. bug 798797. Note that the
+                 // code after this never executes.
     return res;
   }
 
   // Slice and dice as per RFC 5764 S 4.2
   unsigned char client_write_key[SRTP_TOTAL_KEY_LENGTH];
   unsigned char server_write_key[SRTP_TOTAL_KEY_LENGTH];
   int offset = 0;
   memcpy(client_write_key, srtp_block + offset, SRTP_MASTER_KEY_LENGTH);
   offset += SRTP_MASTER_KEY_LENGTH;
   memcpy(server_write_key, srtp_block + offset, SRTP_MASTER_KEY_LENGTH);
   offset += SRTP_MASTER_KEY_LENGTH;
   memcpy(client_write_key + SRTP_MASTER_KEY_LENGTH,
-         srtp_block + offset, SRTP_MASTER_SALT_LENGTH);
+         srtp_block + offset,
+         SRTP_MASTER_SALT_LENGTH);
   offset += SRTP_MASTER_SALT_LENGTH;
   memcpy(server_write_key + SRTP_MASTER_KEY_LENGTH,
-         srtp_block + offset, SRTP_MASTER_SALT_LENGTH);
+         srtp_block + offset,
+         SRTP_MASTER_SALT_LENGTH);
   offset += SRTP_MASTER_SALT_LENGTH;
   MOZ_ASSERT(offset == sizeof(srtp_block));
 
-  unsigned char *write_key;
-  unsigned char *read_key;
+  unsigned char* write_key;
+  unsigned char* read_key;
 
   if (dtls->role() == TransportLayerDtls::CLIENT) {
     write_key = client_write_key;
     read_key = server_write_key;
   } else {
     write_key = server_write_key;
     read_key = client_write_key;
   }
 
   MOZ_ASSERT(!info.send_srtp_ && !info.recv_srtp_);
-  info.send_srtp_ = SrtpFlow::Create(cipher_suite, false, write_key,
-                                     SRTP_TOTAL_KEY_LENGTH);
-  info.recv_srtp_ = SrtpFlow::Create(cipher_suite, true, read_key,
-                                     SRTP_TOTAL_KEY_LENGTH);
+  info.send_srtp_ =
+    SrtpFlow::Create(cipher_suite, false, write_key, SRTP_TOTAL_KEY_LENGTH);
+  info.recv_srtp_ =
+    SrtpFlow::Create(cipher_suite, true, read_key, SRTP_TOTAL_KEY_LENGTH);
   if (!info.send_srtp_ || !info.recv_srtp_) {
-    CSFLogError(LOGTAG, "Couldn't create SRTP flow for %s",
-                ToString(info.type_));
+    CSFLogError(
+      LOGTAG, "Couldn't create SRTP flow for %s", ToString(info.type_));
     info.state_ = MP_CLOSED;
     UpdateRtcpMuxState(info);
     return NS_ERROR_FAILURE;
   }
 
   if (direction_ == RECEIVE) {
-    CSFLogInfo(LOGTAG, "Listening for %s packets received on %p",
-               ToString(info.type_), dtls->downward());
+    CSFLogInfo(LOGTAG,
+               "Listening for %s packets received on %p",
+               ToString(info.type_),
+               dtls->downward());
 
     switch (info.type_) {
       case RTP:
         dtls->downward()->SignalPacketReceived.connect(
-            this,
-            &MediaPipeline::RtpPacketReceived);
+          this, &MediaPipeline::RtpPacketReceived);
         break;
       case RTCP:
         dtls->downward()->SignalPacketReceived.connect(
-            this,
-            &MediaPipeline::RtcpPacketReceived);
+          this, &MediaPipeline::RtcpPacketReceived);
         break;
       case MUX:
         dtls->downward()->SignalPacketReceived.connect(
-            this,
-            &MediaPipeline::PacketReceived);
+          this, &MediaPipeline::PacketReceived);
         break;
       default:
         MOZ_CRASH();
     }
   }
 
   info.state_ = MP_OPEN;
   UpdateRtcpMuxState(info);
   return NS_OK;
 }
 
-nsresult MediaPipeline::TransportFailed_s(TransportInfo &info) {
+nsresult
+MediaPipeline::TransportFailed_s(TransportInfo& info)
+{
   ASSERT_ON_THREAD(sts_thread_);
 
   info.state_ = MP_CLOSED;
   UpdateRtcpMuxState(info);
 
   CSFLogInfo(LOGTAG, "Transport closed for flow %s", ToString(info.type_));
 
   NS_WARNING(
-      "MediaPipeline Transport failed. This is not properly cleaned up yet");
+    "MediaPipeline Transport failed. This is not properly cleaned up yet");
 
   // TODO(ekr@rtfm.com): SECURITY: Figure out how to clean up if the
   // connection was good and now it is bad.
   // TODO(ekr@rtfm.com): Report up so that the PC knows we
   // have experienced an error.
 
   return NS_OK;
 }
 
-void MediaPipeline::UpdateRtcpMuxState(TransportInfo &info) {
+void
+MediaPipeline::UpdateRtcpMuxState(TransportInfo& info)
+{
   if (info.type_ == MUX) {
     if (info.transport_ == rtcp_.transport_) {
       rtcp_.state_ = info.state_;
       if (!rtcp_.send_srtp_) {
         rtcp_.send_srtp_ = info.send_srtp_;
         rtcp_.recv_srtp_ = info.recv_srtp_;
       }
     }
   }
 }
 
-nsresult MediaPipeline::SendPacket(TransportFlow *flow, const void *data,
-                                   int len) {
+nsresult
+MediaPipeline::SendPacket(TransportFlow* flow, const void* data, int len)
+{
   ASSERT_ON_THREAD(sts_thread_);
 
   // Note that we bypass the DTLS layer here
-  TransportLayerDtls *dtls = static_cast<TransportLayerDtls *>(
-      flow->GetLayer(TransportLayerDtls::ID()));
+  TransportLayerDtls* dtls =
+    static_cast<TransportLayerDtls*>(flow->GetLayer(TransportLayerDtls::ID()));
   MOZ_ASSERT(dtls);
 
-  TransportResult res = dtls->downward()->
-      SendPacket(static_cast<const unsigned char *>(data), len);
+  TransportResult res =
+    dtls->downward()->SendPacket(static_cast<const unsigned char*>(data), len);
 
   if (res != len) {
     // Ignore blocking indications
     if (res == TE_WOULDBLOCK)
       return NS_OK;
 
     CSFLogError(LOGTAG, "Failed write on stream %s", description_.c_str());
     return NS_BASE_STREAM_CLOSED;
   }
 
   return NS_OK;
 }
 
-void MediaPipeline::increment_rtp_packets_sent(int32_t bytes) {
+void
+MediaPipeline::increment_rtp_packets_sent(int32_t bytes)
+{
   ++rtp_packets_sent_;
   rtp_bytes_sent_ += bytes;
 
   if (!(rtp_packets_sent_ % 100)) {
-    CSFLogInfo(LOGTAG, "RTP sent packet count for %s Pipeline %p Flow: %p: %u (%" PRId64 " bytes)",
-               description_.c_str(), this, static_cast<void *>(rtp_.transport_),
-               rtp_packets_sent_, rtp_bytes_sent_);
+    CSFLogInfo(LOGTAG,
+               "RTP sent packet count for %s Pipeline %p Flow: %p: %u (%" PRId64
+               " bytes)",
+               description_.c_str(),
+               this,
+               static_cast<void*>(rtp_.transport_),
+               rtp_packets_sent_,
+               rtp_bytes_sent_);
   }
 }
 
-void MediaPipeline::increment_rtcp_packets_sent() {
+void
+MediaPipeline::increment_rtcp_packets_sent()
+{
   ++rtcp_packets_sent_;
   if (!(rtcp_packets_sent_ % 100)) {
-    CSFLogInfo(LOGTAG, "RTCP sent packet count for %s Pipeline %p Flow: %p: %u",
-               description_.c_str(), this, static_cast<void *>(rtp_.transport_),
+    CSFLogInfo(LOGTAG,
+               "RTCP sent packet count for %s Pipeline %p Flow: %p: %u",
+               description_.c_str(),
+               this,
+               static_cast<void*>(rtp_.transport_),
                rtcp_packets_sent_);
   }
 }
 
-void MediaPipeline::increment_rtp_packets_received(int32_t bytes) {
+void
+MediaPipeline::increment_rtp_packets_received(int32_t bytes)
+{
   ++rtp_packets_received_;
   rtp_bytes_received_ += bytes;
   if (!(rtp_packets_received_ % 100)) {
-    CSFLogInfo(LOGTAG, "RTP received packet count for %s Pipeline %p Flow: %p: %u (%" PRId64 " bytes)",
-               description_.c_str(), this, static_cast<void *>(rtp_.transport_),
-               rtp_packets_received_, rtp_bytes_received_);
+    CSFLogInfo(
+      LOGTAG,
+      "RTP received packet count for %s Pipeline %p Flow: %p: %u (%" PRId64
+      " bytes)",
+      description_.c_str(),
+      this,
+      static_cast<void*>(rtp_.transport_),
+      rtp_packets_received_,
+      rtp_bytes_received_);
   }
 }
 
-void MediaPipeline::increment_rtcp_packets_received() {
+void
+MediaPipeline::increment_rtcp_packets_received()
+{
   ++rtcp_packets_received_;
   if (!(rtcp_packets_received_ % 100)) {
-    CSFLogInfo(LOGTAG, "RTCP received packet count for %s Pipeline %p Flow: %p: %u",
-               description_.c_str(), this, static_cast<void *>(rtp_.transport_),
+    CSFLogInfo(LOGTAG,
+               "RTCP received packet count for %s Pipeline %p Flow: %p: %u",
+               description_.c_str(),
+               this,
+               static_cast<void*>(rtp_.transport_),
                rtcp_packets_received_);
   }
 }
 
-void MediaPipeline::RtpPacketReceived(TransportLayer *layer,
-                                      const unsigned char *data,
-                                      size_t len) {
+void
+MediaPipeline::RtpPacketReceived(TransportLayer* layer,
+                                 const unsigned char* data,
+                                 size_t len)
+{
   if (direction_ == TRANSMIT) {
     return;
   }
 
   if (!transport_->pipeline()) {
     CSFLogError(LOGTAG, "Discarding incoming packet; transport disconnected");
     return;
   }
@@ -1064,62 +1163,68 @@ void MediaPipeline::RtpPacketReceived(Tr
   if (header.numCSRCs) {
     for (auto i = 0; i < header.numCSRCs; i++) {
       if (!hasTime) {
         now = GetNow();
         hasTime = true;
       }
       auto csrcInfo = csrc_stats_.find(header.arrOfCSRCs[i]);
       if (csrcInfo == csrc_stats_.end()) {
-        csrc_stats_.insert(std::make_pair(header.arrOfCSRCs[i],
-            RtpCSRCStats(header.arrOfCSRCs[i],now)));
+        csrc_stats_.insert(std::make_pair(
+          header.arrOfCSRCs[i], RtpCSRCStats(header.arrOfCSRCs[i], now)));
       } else {
         csrcInfo->second.SetTimestamp(now);
       }
     }
   }
 
-  packet_dumper_->Dump(
-      level_, dom::mozPacketDumpType::Srtp, false, data, len);
+  packet_dumper_->Dump(level_, dom::mozPacketDumpType::Srtp, false, data, len);
 
   // Make a copy rather than cast away constness
   auto inner_data = MakeUnique<unsigned char[]>(len);
   memcpy(inner_data.get(), data, len);
   int out_len = 0;
-  nsresult res = rtp_.recv_srtp_->UnprotectRtp(inner_data.get(),
-                                               len, len, &out_len);
+  nsresult res =
+    rtp_.recv_srtp_->UnprotectRtp(inner_data.get(), len, len, &out_len);
   if (!NS_SUCCEEDED(res)) {
     char tmp[16];
 
-    SprintfLiteral(tmp, "%.2x %.2x %.2x %.2x",
+    SprintfLiteral(tmp,
+                   "%.2x %.2x %.2x %.2x",
                    inner_data[0],
                    inner_data[1],
                    inner_data[2],
                    inner_data[3]);
 
-    CSFLogError(LOGTAG, "Error unprotecting RTP in %s len= %zu [%s]",
-                description_.c_str(), len, tmp);
+    CSFLogError(LOGTAG,
+                "Error unprotecting RTP in %s len= %zu [%s]",
+                description_.c_str(),
+                len,
+                tmp);
     return;
   }
   CSFLogDebug(LOGTAG, "%s received RTP packet.", description_.c_str());
   increment_rtp_packets_received(out_len);
   OnRtpPacketReceived();
 
-  RtpLogger::LogPacket(inner_data.get(), out_len, true, true, header.headerLength,
-                       description_);
+  RtpLogger::LogPacket(
+    inner_data.get(), out_len, true, true, header.headerLength, description_);
 
   packet_dumper_->Dump(
-      level_, dom::mozPacketDumpType::Rtp, false, inner_data.get(), out_len);
+    level_, dom::mozPacketDumpType::Rtp, false, inner_data.get(), out_len);
 
-  (void)conduit_->ReceivedRTPPacket(inner_data.get(), out_len, header.ssrc);  // Ignore error codes
+  (void)conduit_->ReceivedRTPPacket(
+    inner_data.get(), out_len, header.ssrc); // Ignore error codes
 }
 
-void MediaPipeline::RtcpPacketReceived(TransportLayer *layer,
-                                       const unsigned char *data,
-                                       size_t len) {
+void
+MediaPipeline::RtcpPacketReceived(TransportLayer* layer,
+                                  const unsigned char* data,
+                                  size_t len)
+{
   if (!transport_->pipeline()) {
     CSFLogDebug(LOGTAG, "Discarding incoming packet; transport disconnected");
     return;
   }
 
   if (!conduit_) {
     CSFLogDebug(LOGTAG, "Discarding incoming packet; media disconnected");
     return;
@@ -1147,116 +1252,117 @@ void MediaPipeline::RtcpPacketReceived(T
   // We do not filter receiver reports, since the webrtc.org code for
   // senders already has logic to ignore RRs that do not apply.
   // TODO bug 1279153: remove SR check for reduced size RTCP
   if (filter_ && !filter_->FilterSenderReport(data, len)) {
     CSFLogWarn(LOGTAG, "Dropping incoming RTCP packet; filtered out");
     return;
   }
 
-  packet_dumper_->Dump(
-      level_, dom::mozPacketDumpType::Srtcp, false, data, len);
+  packet_dumper_->Dump(level_, dom::mozPacketDumpType::Srtcp, false, data, len);
 
   // Make a copy rather than cast away constness
   auto inner_data = MakeUnique<unsigned char[]>(len);
   memcpy(inner_data.get(), data, len);
   int out_len;
 
-  nsresult res = rtcp_.recv_srtp_->UnprotectRtcp(inner_data.get(),
-                                                 len,
-                                                 len,
-                                                 &out_len);
+  nsresult res =
+    rtcp_.recv_srtp_->UnprotectRtcp(inner_data.get(), len, len, &out_len);
 
   if (!NS_SUCCEEDED(res))
     return;
 
   CSFLogDebug(LOGTAG, "%s received RTCP packet.", description_.c_str());
   increment_rtcp_packets_received();
 
   RtpLogger::LogPacket(inner_data.get(), out_len, true, false, 0, description_);
 
-  packet_dumper_->Dump(
-      level_, dom::mozPacketDumpType::Rtcp, false, data, len);
+  packet_dumper_->Dump(level_, dom::mozPacketDumpType::Rtcp, false, data, len);
 
-  MOZ_ASSERT(rtcp_.recv_srtp_);  // This should never happen
+  MOZ_ASSERT(rtcp_.recv_srtp_); // This should never happen
 
-  (void)conduit_->ReceivedRTCPPacket(inner_data.get(), out_len);  // Ignore error codes
+  (void)conduit_->ReceivedRTCPPacket(inner_data.get(),
+                                     out_len); // Ignore error codes
 }
 
-bool MediaPipeline::IsRtp(const unsigned char *data, size_t len) {
+bool
+MediaPipeline::IsRtp(const unsigned char* data, size_t len)
+{
   if (len < 2)
     return false;
 
   // Check if this is a RTCP packet. Logic based on the types listed in
   // media/webrtc/trunk/src/modules/rtp_rtcp/source/rtp_utility.cc
 
   // Anything outside this range is RTP.
   if ((data[1] < 192) || (data[1] > 207))
     return true;
 
-  if (data[1] == 192)  // FIR
+  if (data[1] == 192) // FIR
     return false;
 
-  if (data[1] == 193)  // NACK, but could also be RTP. This makes us sad
-    return true;       // but it's how webrtc.org behaves.
+  if (data[1] == 193) // NACK, but could also be RTP. This makes us sad
+    return true;      // but it's how webrtc.org behaves.
 
   if (data[1] == 194)
     return true;
 
-  if (data[1] == 195)  // IJ.
+  if (data[1] == 195) // IJ.
     return false;
 
-  if ((data[1] > 195) && (data[1] < 200))  // the > 195 is redundant
+  if ((data[1] > 195) && (data[1] < 200)) // the > 195 is redundant
     return true;
 
-  if ((data[1] >= 200) && (data[1] <= 207))  // SR, RR, SDES, BYE,
-    return false;                            // APP, RTPFB, PSFB, XR
+  if ((data[1] >= 200) && (data[1] <= 207)) // SR, RR, SDES, BYE,
+    return false;                           // APP, RTPFB, PSFB, XR
 
-  MOZ_ASSERT(false);  // Not reached, belt and suspenders.
+  MOZ_ASSERT(false); // Not reached, belt and suspenders.
   return true;
 }
 
-void MediaPipeline::PacketReceived(TransportLayer *layer,
-                                   const unsigned char *data,
-                                   size_t len) {
+void
+MediaPipeline::PacketReceived(TransportLayer* layer,
+                              const unsigned char* data,
+                              size_t len)
+{
   if (!transport_->pipeline()) {
     CSFLogDebug(LOGTAG, "Discarding incoming packet; transport disconnected");
     return;
   }
 
   if (IsRtp(data, len)) {
     RtpPacketReceived(layer, data, len);
   } else {
     RtcpPacketReceived(layer, data, len);
   }
 }
 
-class MediaPipelineTransmit::PipelineListener
-  : public MediaStreamVideoSink
+class MediaPipelineTransmit::PipelineListener : public MediaStreamVideoSink
 {
-friend class MediaPipelineTransmit;
+  friend class MediaPipelineTransmit;
+
 public:
   explicit PipelineListener(const RefPtr<MediaSessionConduit>& conduit)
-    : conduit_(conduit),
-      track_id_(TRACK_INVALID),
-      mMutex("MediaPipelineTransmit::PipelineListener"),
-      track_id_external_(TRACK_INVALID),
-      active_(false),
-      enabled_(false),
-      direct_connect_(false)
+    : conduit_(conduit)
+    , track_id_(TRACK_INVALID)
+    , mMutex("MediaPipelineTransmit::PipelineListener")
+    , track_id_external_(TRACK_INVALID)
+    , active_(false)
+    , enabled_(false)
+    , direct_connect_(false)
   {
   }
 
   ~PipelineListener()
   {
     if (!NS_IsMainThread()) {
       // release conduit on mainthread.  Must use forget()!
-      nsresult rv = NS_DispatchToMainThread(new
-                                            ConduitDeleteEvent(conduit_.forget()));
-      MOZ_ASSERT(!NS_FAILED(rv),"Could not dispatch conduit shutdown to main");
+      nsresult rv =
+        NS_DispatchToMainThread(new ConduitDeleteEvent(conduit_.forget()));
+      MOZ_ASSERT(!NS_FAILED(rv), "Could not dispatch conduit shutdown to main");
       if (NS_FAILED(rv)) {
         MOZ_CRASH();
       }
     } else {
       conduit_ = nullptr;
     }
     if (converter_) {
       converter_->Shutdown();
@@ -1287,24 +1393,30 @@ public:
   void OnVideoFrameConverted(unsigned char* aVideoFrame,
                              unsigned int aVideoFrameLength,
                              unsigned short aWidth,
                              unsigned short aHeight,
                              VideoType aVideoType,
                              uint64_t aCaptureTime)
   {
     MOZ_RELEASE_ASSERT(conduit_->type() == MediaSessionConduit::VIDEO);
-    static_cast<VideoSessionConduit*>(conduit_.get())->SendVideoFrame(
-      aVideoFrame, aVideoFrameLength, aWidth, aHeight, aVideoType, aCaptureTime);
+    static_cast<VideoSessionConduit*>(conduit_.get())
+      ->SendVideoFrame(aVideoFrame,
+                       aVideoFrameLength,
+                       aWidth,
+                       aHeight,
+                       aVideoType,
+                       aCaptureTime);
   }
 
   void OnVideoFrameConverted(webrtc::VideoFrame& aVideoFrame)
   {
     MOZ_RELEASE_ASSERT(conduit_->type() == MediaSessionConduit::VIDEO);
-    static_cast<VideoSessionConduit*>(conduit_.get())->SendVideoFrame(aVideoFrame);
+    static_cast<VideoSessionConduit*>(conduit_.get())
+      ->SendVideoFrame(aVideoFrame);
   }
 
   // Implement MediaStreamTrackListener
   void NotifyQueuedChanges(MediaStreamGraph* aGraph,
                            StreamTime aTrackOffset,
                            const MediaSegment& aQueuedMedia) override;
 
   // Implement DirectMediaStreamTrackListener
@@ -1314,17 +1426,18 @@ public:
   void NotifyDirectListenerInstalled(InstallationResult aResult) override;
   void NotifyDirectListenerUninstalled() override;
 
   // Implement MediaStreamVideoSink
   void SetCurrentFrames(const VideoSegment& aSegment) override;
   void ClearFrames() override {}
 
 private:
-  void UnsetTrackIdImpl() {
+  void UnsetTrackIdImpl()
+  {
     MutexAutoLock lock(mMutex);
     track_id_ = track_id_external_ = TRACK_INVALID;
   }
 
   void NewData(const MediaSegment& media, TrackRate aRate = 0);
 
   RefPtr<MediaSessionConduit> conduit_;
   RefPtr<AudioProxyThread> audio_processing_;
@@ -1348,23 +1461,22 @@ private:
 };
 
 // Implements VideoConverterListener for MediaPipeline.
 //
 // We pass converted frames on to MediaPipelineTransmit::PipelineListener
 // where they are further forwarded to VideoConduit.
 // MediaPipelineTransmit calls Detach() during shutdown to ensure there is
 // no cyclic dependencies between us and PipelineListener.
-class MediaPipelineTransmit::VideoFrameFeeder
-  : public VideoConverterListener
+class MediaPipelineTransmit::VideoFrameFeeder : public VideoConverterListener
 {
 public:
   explicit VideoFrameFeeder(const RefPtr<PipelineListener>& listener)
-    : listener_(listener),
-      mutex_("VideoFrameFeeder")
+    : listener_(listener)
+    , mutex_("VideoFrameFeeder")
   {
     MOZ_COUNT_CTOR(VideoFrameFeeder);
   }
 
   void Detach()
   {
     MutexAutoLock lock(mutex_);
 
@@ -1379,60 +1491,61 @@ public:
                              uint64_t aCaptureTime) override
   {
     MutexAutoLock lock(mutex_);
 
     if (!listener_) {
       return;
     }
 
-    listener_->OnVideoFrameConverted(aVideoFrame, aVideoFrameLength,
-                                     aWidth, aHeight, aVideoType, aCaptureTime);
+    listener_->OnVideoFrameConverted(aVideoFrame,
+                                     aVideoFrameLength,
+                                     aWidth,
+                                     aHeight,
+                                     aVideoType,
+                                     aCaptureTime);
   }
 
   void OnVideoFrameConverted(webrtc::VideoFrame& aVideoFrame) override
   {
     MutexAutoLock lock(mutex_);
 
     if (!listener_) {
       return;
     }
 
     listener_->OnVideoFrameConverted(aVideoFrame);
   }
 
 protected:
-  virtual ~VideoFrameFeeder()
-  {
-    MOZ_COUNT_DTOR(VideoFrameFeeder);
-  }
+  virtual ~VideoFrameFeeder() { MOZ_COUNT_DTOR(VideoFrameFeeder); }
 
   RefPtr<PipelineListener> listener_;
   Mutex mutex_;
 };
 
 MediaPipelineTransmit::MediaPipelineTransmit(
-    const std::string& pc,
-    nsCOMPtr<nsIEventTarget> main_thread,
-    nsCOMPtr<nsIEventTarget> sts_thread,
-    bool is_video,
-    dom::MediaStreamTrack* domtrack,
-    RefPtr<MediaSessionConduit> conduit) :
-  MediaPipeline(pc, TRANSMIT, main_thread, sts_thread, conduit),
-  listener_(new PipelineListener(conduit)),
-  is_video_(is_video),
-  domtrack_(domtrack),
-  transmitting_(false)
+  const std::string& pc,
+  nsCOMPtr<nsIEventTarget> main_thread,
+  nsCOMPtr<nsIEventTarget> sts_thread,
+  bool is_video,
+  dom::MediaStreamTrack* domtrack,
+  RefPtr<MediaSessionConduit> conduit)
+  : MediaPipeline(pc, TRANSMIT, main_thread, sts_thread, conduit)
+  , listener_(new PipelineListener(conduit))
+  , is_video_(is_video)
+  , domtrack_(domtrack)
+  , transmitting_(false)
 {
   SetDescription();
   if (!IsVideo()) {
-    audio_processing_ = MakeAndAddRef<AudioProxyThread>(static_cast<AudioSessionConduit*>(conduit.get()));
+    audio_processing_ = MakeAndAddRef<AudioProxyThread>(
+      static_cast<AudioSessionConduit*>(conduit.get()));
     listener_->SetAudioProxy(audio_processing_);
-  }
-  else { // Video
+  } else { // Video
     // For video we send frames to an async VideoFrameConverter that calls
     // back to a VideoFrameFeeder that feeds I420 frames to VideoConduit.
 
     feeder_ = MakeAndAddRef<VideoFrameFeeder>(listener_);
 
     converter_ = MakeAndAddRef<VideoFrameConverter>();
     converter_->AddListener(feeder_);
 
@@ -1444,34 +1557,39 @@ MediaPipelineTransmit::~MediaPipelineTra
 {
   if (feeder_) {
     feeder_->Detach();
   }
 
   MOZ_ASSERT(!domtrack_);
 }
 
-void MediaPipelineTransmit::SetDescription() {
+void
+MediaPipelineTransmit::SetDescription()
+{
   description_ = pc_ + "| ";
-  description_ += conduit_->type() == MediaSessionConduit::AUDIO ?
-      "Transmit audio[" : "Transmit video[";
+  description_ += conduit_->type() == MediaSessionConduit::AUDIO
+                    ? "Transmit audio["
+                    : "Transmit video[";
 
   if (!domtrack_) {
     description_ += "no track]";
     return;
   }
 
   nsString nsTrackId;
   domtrack_->GetId(nsTrackId);
   std::string track_id(NS_ConvertUTF16toUTF8(nsTrackId).get());
   description_ += track_id;
   description_ += "]";
 }
 
-void MediaPipelineTransmit::Stop() {
+void
+MediaPipelineTransmit::Stop()
+{
   ASSERT_ON_THREAD(main_thread_);
 
   if (!domtrack_ || !transmitting_) {
     return;
   }
 
   transmitting_ = false;
 
@@ -1482,65 +1600,72 @@ void MediaPipelineTransmit::Stop() {
     video->RemoveVideoOutput(listener_);
   } else {
     MOZ_ASSERT(false, "Unknown track type");
   }
 
   conduit_->StopTransmitting();
 }
 
-void MediaPipelineTransmit::Start() {
+void
+MediaPipelineTransmit::Start()
+{
   ASSERT_ON_THREAD(main_thread_);
 
   if (!domtrack_ || transmitting_) {
     return;
   }
 
   transmitting_ = true;
 
   conduit_->StartTransmitting();
 
   // TODO(ekr@rtfm.com): Check for errors
-  CSFLogDebug(LOGTAG, "Attaching pipeline to track %p conduit type=%s", this,
-              (conduit_->type() == MediaSessionConduit::AUDIO ?"audio":"video"));
+  CSFLogDebug(
+    LOGTAG,
+    "Attaching pipeline to track %p conduit type=%s",
+    this,
+    (conduit_->type() == MediaSessionConduit::AUDIO ? "audio" : "video"));
 
 #if !defined(MOZILLA_EXTERNAL_LINKAGE)
   // With full duplex we don't risk having audio come in late to the MSG
   // so we won't need a direct listener.
   const bool enableDirectListener =
     !Preferences::GetBool("media.navigator.audio.full_duplex", false);
 #else
   const bool enableDirectListener = true;
 #endif
 
   if (domtrack_->AsAudioStreamTrack()) {
     if (enableDirectListener) {
       // Register the Listener directly with the source if we can.
       // We also register it as a non-direct listener so we fall back to that
-      // if installing the direct listener fails. As a direct listener we get access
-      // to direct unqueued (and not resampled) data.
+      // if installing the direct listener fails. As a direct listener we get
+      // access to direct unqueued (and not resampled) data.
       domtrack_->AddDirectListener(listener_);
     }
     domtrack_->AddListener(listener_);
   } else if (VideoStreamTrack* video = domtrack_->AsVideoStreamTrack()) {
     video->AddVideoOutput(listener_);
   } else {
     MOZ_ASSERT(false, "Unknown track type");
   }
 }
 
 bool
 MediaPipelineTransmit::IsVideo() const
 {
   return is_video_;
 }
 
-void MediaPipelineTransmit::UpdateSinkIdentity_m(MediaStreamTrack* track,
-                                                 nsIPrincipal* principal,
-                                                 const PeerIdentity* sinkIdentity) {
+void
+MediaPipelineTransmit::UpdateSinkIdentity_m(MediaStreamTrack* track,
+                                            nsIPrincipal* principal,
+                                            const PeerIdentity* sinkIdentity)
+{
   ASSERT_ON_THREAD(main_thread_);
 
   if (track != nullptr && track != domtrack_) {
     // If a track is specified, then it might not be for this pipeline,
     // since we receive notifications for all tracks on the PC.
     // nullptr means that the PeerIdentity has changed and shall be applied
     // to all tracks of the PC.
     return;
@@ -1563,38 +1688,46 @@ void MediaPipelineTransmit::UpdateSinkId
 void
 MediaPipelineTransmit::DetachMedia()
 {
   ASSERT_ON_THREAD(main_thread_);
   domtrack_ = nullptr;
   // Let the listener be destroyed with the pipeline (or later).
 }
 
-nsresult MediaPipelineTransmit::TransportReady_s(TransportInfo &info) {
+nsresult
+MediaPipelineTransmit::TransportReady_s(TransportInfo& info)
+{
   ASSERT_ON_THREAD(sts_thread_);
   // Call base ready function.
   MediaPipeline::TransportReady_s(info);
 
   // Should not be set for a transmitter
   if (&info == &rtp_) {
     listener_->SetActive(true);
   }
 
   return NS_OK;
 }
 
-nsresult MediaPipelineTransmit::ReplaceTrack(RefPtr<MediaStreamTrack>& domtrack) {
+nsresult
+MediaPipelineTransmit::ReplaceTrack(RefPtr<MediaStreamTrack>& domtrack)
+{
   // MainThread, checked in calls we make
   if (domtrack) {
     nsString nsTrackId;
     domtrack->GetId(nsTrackId);
     std::string track_id(NS_ConvertUTF16toUTF8(nsTrackId).get());
-    CSFLogDebug(LOGTAG, "Reattaching pipeline %s to track %p track %s conduit type: %s",
-                description_.c_str(), &domtrack, track_id.c_str(),
-                (conduit_->type() == MediaSessionConduit::AUDIO ?"audio":"video"));
+    CSFLogDebug(
+      LOGTAG,
+      "Reattaching pipeline %s to track %p track %s conduit type: %s",
+      description_.c_str(),
+      &domtrack,
+      track_id.c_str(),
+      (conduit_->type() == MediaSessionConduit::AUDIO ? "audio" : "video"));
   }
 
   RefPtr<dom::MediaStreamTrack> oldTrack = domtrack_;
   bool wasTransmitting = oldTrack && transmitting_;
   Stop();
   domtrack_ = domtrack;
   SetDescription();
 
@@ -1604,77 +1737,86 @@ nsresult MediaPipelineTransmit::ReplaceT
   }
 
   if (wasTransmitting) {
     Start();
   }
   return NS_OK;
 }
 
-nsresult MediaPipeline::ConnectTransport_s(TransportInfo &info) {
+nsresult
+MediaPipeline::ConnectTransport_s(TransportInfo& info)
+{
   MOZ_ASSERT(info.transport_);
   ASSERT_ON_THREAD(sts_thread_);
 
   // Look to see if the transport is ready
   if (info.transport_->state() == TransportLayer::TS_OPEN) {
     nsresult res = TransportReady_s(info);
     if (NS_FAILED(res)) {
-      CSFLogError(LOGTAG, "Error calling TransportReady(); res=%u in %s",
-                  static_cast<uint32_t>(res), __FUNCTION__);
+      CSFLogError(LOGTAG,
+                  "Error calling TransportReady(); res=%u in %s",
+                  static_cast<uint32_t>(res),
+                  __FUNCTION__);
       return res;
     }
   } else if (info.transport_->state() == TransportLayer::TS_ERROR) {
-    CSFLogError(LOGTAG, "%s transport is already in error state",
-                ToString(info.type_));
+    CSFLogError(
+      LOGTAG, "%s transport is already in error state", ToString(info.type_));
     TransportFailed_s(info);
     return NS_ERROR_FAILURE;
   }
 
-  info.transport_->SignalStateChange.connect(this,
-                                             &MediaPipeline::StateChange);
+  info.transport_->SignalStateChange.connect(this, &MediaPipeline::StateChange);
 
   return NS_OK;
 }
 
-MediaPipeline::TransportInfo* MediaPipeline::GetTransportInfo_s(
-    TransportFlow *flow) {
+MediaPipeline::TransportInfo*
+MediaPipeline::GetTransportInfo_s(TransportFlow* flow)
+{
   ASSERT_ON_THREAD(sts_thread_);
   if (flow == rtp_.transport_) {
     return &rtp_;
   }
 
   if (flow == rtcp_.transport_) {
     return &rtcp_;
   }
 
   return nullptr;
 }
 
-nsresult MediaPipeline::PipelineTransport::SendRtpPacket(
-    const uint8_t* data, size_t len) {
+nsresult
+MediaPipeline::PipelineTransport::SendRtpPacket(const uint8_t* data, size_t len)
+{
 
-  nsAutoPtr<DataBuffer> buf(new DataBuffer(data, len, len + SRTP_MAX_EXPANSION));
+  nsAutoPtr<DataBuffer> buf(
+    new DataBuffer(data, len, len + SRTP_MAX_EXPANSION));
 
-  RUN_ON_THREAD(sts_thread_,
-                WrapRunnable(
-                             RefPtr<MediaPipeline::PipelineTransport>(this),
-                             &MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s,
-                             buf, true),
-                NS_DISPATCH_NORMAL);
+  RUN_ON_THREAD(
+    sts_thread_,
+    WrapRunnable(RefPtr<MediaPipeline::PipelineTransport>(this),
+                 &MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s,
+                 buf,
+                 true),
+    NS_DISPATCH_NORMAL);
 
   return NS_OK;
 }
 
-nsresult MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s(
-    nsAutoPtr<DataBuffer> data,
-    bool is_rtp) {
+nsresult
+MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s(
+  nsAutoPtr<DataBuffer> data,
+  bool is_rtp)
+{
 
   ASSERT_ON_THREAD(sts_thread_);
   if (!pipeline_) {
-    return NS_OK;  // Detached
+    return NS_OK; // Detached
   }
   TransportInfo& transport = is_rtp ? pipeline_->rtp_ : pipeline_->rtcp_;
 
   if (!transport.send_srtp_) {
     CSFLogDebug(LOGTAG, "Couldn't write RTP/RTCP packet; SRTP not set up yet");
     return NS_OK;
   }
 
@@ -1684,115 +1826,145 @@ nsresult MediaPipeline::PipelineTranspor
   // libsrtp enciphers in place, so we need a big enough buffer.
   MOZ_ASSERT(data->capacity() >= data->len() + SRTP_MAX_EXPANSION);
 
   if (RtpLogger::IsPacketLoggingOn()) {
     int header_len = 12;
     webrtc::RTPHeader header;
     if (pipeline_->rtp_parser_ &&
         pipeline_->rtp_parser_->Parse(data->data(), data->len(), &header)) {
-        header_len = header.headerLength;
+      header_len = header.headerLength;
     }
-    RtpLogger::LogPacket(data->data(), data->len(), false, is_rtp, header_len,
+    RtpLogger::LogPacket(data->data(),
+                         data->len(),
+                         false,
+                         is_rtp,
+                         header_len,
                          pipeline_->description_);
   }
 
   int out_len;
   nsresult res;
   if (is_rtp) {
-    pipeline_->packet_dumper_->Dump(
-        pipeline_->level(), dom::mozPacketDumpType::Rtp, true, data->data(), data->len());
+    pipeline_->packet_dumper_->Dump(pipeline_->level(),
+                                    dom::mozPacketDumpType::Rtp,
+                                    true,
+                                    data->data(),
+                                    data->len());
 
-    res = transport.send_srtp_->ProtectRtp(data->data(),
-                                           data->len(),
-                                           data->capacity(),
-                                           &out_len);
+    res = transport.send_srtp_->ProtectRtp(
+      data->data(), data->len(), data->capacity(), &out_len);
   } else {
-    pipeline_->packet_dumper_->Dump(
-        pipeline_->level(), dom::mozPacketDumpType::Rtcp, true, data->data(), data->len());
+    pipeline_->packet_dumper_->Dump(pipeline_->level(),
+                                    dom::mozPacketDumpType::Rtcp,
+                                    true,
+                                    data->data(),
+                                    data->len());
 
-    res = transport.send_srtp_->ProtectRtcp(data->data(),
-                                            data->len(),
-                                            data->capacity(),
-                                            &out_len);
+    res = transport.send_srtp_->ProtectRtcp(
+      data->data(), data->len(), data->capacity(), &out_len);
   }
   if (!NS_SUCCEEDED(res)) {
     return res;
   }
 
   // paranoia; don't have uninitialized bytes included in data->len()
   data->SetLength(out_len);
 
-  CSFLogDebug(LOGTAG, "%s sending %s packet", pipeline_->description_.c_str(),
+  CSFLogDebug(LOGTAG,
+              "%s sending %s packet",
+              pipeline_->description_.c_str(),
               (is_rtp ? "RTP" : "RTCP"));
   if (is_rtp) {
-    pipeline_->packet_dumper_->Dump(
-        pipeline_->level(), dom::mozPacketDumpType::Srtp, true, data->data(), out_len);
+    pipeline_->packet_dumper_->Dump(pipeline_->level(),
+                                    dom::mozPacketDumpType::Srtp,
+                                    true,
+                                    data->data(),
+                                    out_len);
 
     pipeline_->increment_rtp_packets_sent(out_len);
   } else {
-    pipeline_->packet_dumper_->Dump(
-        pipeline_->level(), dom::mozPacketDumpType::Srtcp, true, data->data(), out_len);
+    pipeline_->packet_dumper_->Dump(pipeline_->level(),
+                                    dom::mozPacketDumpType::Srtcp,
+                                    true,
+                                    data->data(),
+                                    out_len);
 
     pipeline_->increment_rtcp_packets_sent();
   }
   return pipeline_->SendPacket(transport.transport_, data->data(), out_len);
 }
 
-nsresult MediaPipeline::PipelineTransport::SendRtcpPacket(
-    const uint8_t* data, size_t len) {
+nsresult
+MediaPipeline::PipelineTransport::SendRtcpPacket(const uint8_t* data,
+                                                 size_t len)
+{
 
-  nsAutoPtr<DataBuffer> buf(new DataBuffer(data, len, len + SRTP_MAX_EXPANSION));
+  nsAutoPtr<DataBuffer> buf(
+    new DataBuffer(data, len, len + SRTP_MAX_EXPANSION));
 
-  RUN_ON_THREAD(sts_thread_,
-                WrapRunnable(
-                             RefPtr<MediaPipeline::PipelineTransport>(this),
-                             &MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s,
-                             buf, false),
-                NS_DISPATCH_NORMAL);
+  RUN_ON_THREAD(
+    sts_thread_,
+    WrapRunnable(RefPtr<MediaPipeline::PipelineTransport>(this),
+                 &MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s,
+                 buf,
+                 false),
+    NS_DISPATCH_NORMAL);
 
   return NS_OK;
 }
 
-void MediaPipelineTransmit::PipelineListener::
-UnsetTrackId(MediaStreamGraphImpl* graph) {
-  class Message : public ControlMessage {
+void
+MediaPipelineTransmit::PipelineListener::UnsetTrackId(
+  MediaStreamGraphImpl* graph)
+{
+  class Message : public ControlMessage
+  {
   public:
-    explicit Message(PipelineListener* listener) :
-      ControlMessage(nullptr), listener_(listener) {}
-    virtual void Run() override
+    explicit Message(PipelineListener* listener)
+      : ControlMessage(nullptr)
+      , listener_(listener)
     {
-      listener_->UnsetTrackIdImpl();
     }
+    virtual void Run() override { listener_->UnsetTrackIdImpl(); }
     RefPtr<PipelineListener> listener_;
   };
   graph->AppendMessage(MakeUnique<Message>(this));
 }
 // Called if we're attached with AddDirectListener()
-void MediaPipelineTransmit::PipelineListener::
-NotifyRealtimeTrackData(MediaStreamGraph* graph,
-                        StreamTime offset,
-                        const MediaSegment& media) {
-  CSFLogDebug(LOGTAG, "MediaPipeline::NotifyRealtimeTrackData() listener=%p, offset=%" PRId64 ", duration=%" PRId64,
-              this, offset, media.GetDuration());
+void
+MediaPipelineTransmit::PipelineListener::NotifyRealtimeTrackData(
+  MediaStreamGraph* graph,
+  StreamTime offset,
+  const MediaSegment& media)
+{
+  CSFLogDebug(
+    LOGTAG,
+    "MediaPipeline::NotifyRealtimeTrackData() listener=%p, offset=%" PRId64
+    ", duration=%" PRId64,
+    this,
+    offset,
+    media.GetDuration());
 
   if (media.GetType() == MediaSegment::VIDEO) {
     // We have to call the upstream NotifyRealtimeTrackData and
     // MediaStreamVideoSink will route them to SetCurrentFrames.
     MediaStreamVideoSink::NotifyRealtimeTrackData(graph, offset, media);
     return;
   }
 
   NewData(media, graph->GraphRate());
 }
 
-void MediaPipelineTransmit::PipelineListener::
-NotifyQueuedChanges(MediaStreamGraph* graph,
-                    StreamTime offset,
-                    const MediaSegment& queued_media) {
+void
+MediaPipelineTransmit::PipelineListener::NotifyQueuedChanges(
+  MediaStreamGraph* graph,
+  StreamTime offset,
+  const MediaSegment& queued_media)
+{
   CSFLogDebug(LOGTAG, "MediaPipeline::NotifyQueuedChanges()");
 
   if (queued_media.GetType() == MediaSegment::VIDEO) {
     // We always get video from SetCurrentFrames().
     return;
   }
 
   if (direct_connect_) {
@@ -1805,105 +1977,124 @@ NotifyQueuedChanges(MediaStreamGraph* gr
     rate = graph->GraphRate();
   } else {
     // When running tests, graph may be null. In that case use a default.
     rate = 16000;
   }
   NewData(queued_media, rate);
 }
 
-void MediaPipelineTransmit::PipelineListener::
-NotifyDirectListenerInstalled(InstallationResult aResult) {
-  CSFLogInfo(LOGTAG, "MediaPipeline::NotifyDirectListenerInstalled() listener=%p, result=%d",
-             this, static_cast<int32_t>(aResult));
+void
+MediaPipelineTransmit::PipelineListener::NotifyDirectListenerInstalled(
+  InstallationResult aResult)
+{
+  CSFLogInfo(
+    LOGTAG,
+    "MediaPipeline::NotifyDirectListenerInstalled() listener=%p, result=%d",
+    this,
+    static_cast<int32_t>(aResult));
 
   direct_connect_ = InstallationResult::SUCCESS == aResult;
 }
 
-void MediaPipelineTransmit::PipelineListener::
-NotifyDirectListenerUninstalled() {
-  CSFLogInfo(LOGTAG, "MediaPipeline::NotifyDirectListenerUninstalled() listener=%p", this);
+void
+MediaPipelineTransmit::PipelineListener::NotifyDirectListenerUninstalled()
+{
+  CSFLogInfo(LOGTAG,
+             "MediaPipeline::NotifyDirectListenerUninstalled() listener=%p",
+             this);
 
   direct_connect_ = false;
 }
 
-void MediaPipelineTransmit::PipelineListener::
-NewData(const MediaSegment& media, TrackRate aRate /* = 0 */) {
+void
+MediaPipelineTransmit::PipelineListener::NewData(const MediaSegment& media,
+                                                 TrackRate aRate /* = 0 */)
+{
   if (!active_) {
     CSFLogDebug(LOGTAG, "Discarding packets because transport not ready");
     return;
   }
 
-  if (conduit_->type() !=
-      (media.GetType() == MediaSegment::AUDIO ? MediaSessionConduit::AUDIO :
-                                                MediaSessionConduit::VIDEO)) {
-    MOZ_ASSERT(false, "The media type should always be correct since the "
-                      "listener is locked to a specific track");
+  if (conduit_->type() != (media.GetType() == MediaSegment::AUDIO
+                             ? MediaSessionConduit::AUDIO
+                             : MediaSessionConduit::VIDEO)) {
+    MOZ_ASSERT(false,
+               "The media type should always be correct since the "
+               "listener is locked to a specific track");
     return;
   }
 
   // TODO(ekr@rtfm.com): For now assume that we have only one
   // track type and it's destined for us
   // See bug 784517
   if (media.GetType() == MediaSegment::AUDIO) {
     MOZ_RELEASE_ASSERT(aRate > 0);
 
-    AudioSegment* audio = const_cast<AudioSegment *>(static_cast<const AudioSegment*>(&media));
-    for(AudioSegment::ChunkIterator iter(*audio); !iter.IsEnded(); iter.Next()) {
+    AudioSegment* audio =
+      const_cast<AudioSegment*>(static_cast<const AudioSegment*>(&media));
+    for (AudioSegment::ChunkIterator iter(*audio); !iter.IsEnded();
+         iter.Next()) {
       audio_processing_->QueueAudioChunk(aRate, *iter, enabled_);
     }
   } else {
-    VideoSegment* video = const_cast<VideoSegment *>(static_cast<const VideoSegment*>(&media));
+    VideoSegment* video =
+      const_cast<VideoSegment*>(static_cast<const VideoSegment*>(&media));
     VideoSegment::ChunkIterator iter(*video);
-    for(VideoSegment::ChunkIterator iter(*video); !iter.IsEnded(); iter.Next()) {
+    for (VideoSegment::ChunkIterator iter(*video); !iter.IsEnded();
+         iter.Next()) {
       converter_->QueueVideoChunk(*iter, !enabled_);
     }
   }
 }
 
-void MediaPipelineTransmit::PipelineListener::
-SetCurrentFrames(const VideoSegment& aSegment)
+void
+MediaPipelineTransmit::PipelineListener::SetCurrentFrames(
+  const VideoSegment& aSegment)
 {
   NewData(aSegment);
 }
 
-class TrackAddedCallback {
- public:
+class TrackAddedCallback
+{
+public:
   virtual void TrackAdded(TrackTicks current_ticks) = 0;
 
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TrackAddedCallback);
 
- protected:
+protected:
   virtual ~TrackAddedCallback() {}
 };
 
 class GenericReceiveListener;
 
 class GenericReceiveCallback : public TrackAddedCallback
 {
- public:
+public:
   explicit GenericReceiveCallback(GenericReceiveListener* listener)
-    : listener_(listener) {}
+    : listener_(listener)
+  {
+  }
 
   void TrackAdded(TrackTicks time);
 
- private:
+private:
   RefPtr<GenericReceiveListener> listener_;
 };
 
 class GenericReceiveListener : public MediaStreamListener
 {
- public:
+public:
   explicit GenericReceiveListener(dom::MediaStreamTrack* track)
-    : track_(track),
-      played_ticks_(0),
-      last_log_(0),
-      principal_handle_(PRINCIPAL_HANDLE_NONE),
-      listening_(false),
-      maybe_track_needs_unmute_(true)
+    : track_(track)
+    , played_ticks_(0)
+    , last_log_(0)
+    , principal_handle_(PRINCIPAL_HANDLE_NONE)
+    , listening_(false)
+    , maybe_track_needs_unmute_(true)
   {
     MOZ_ASSERT(track->GetInputStream()->AsSourceStream());
   }
 
   virtual ~GenericReceiveListener()
   {
     NS_ReleaseOnMainThreadSystemGroup(
       "GenericReceiveListener::track_", track_.forget());
@@ -1948,23 +2139,22 @@ class GenericReceiveListener : public Me
   {
     CSFLogDebug(LOGTAG, "GenericReceiveListener ending track");
 
     // We do this on MSG to avoid it racing against StartTrack.
     class Message : public ControlMessage
     {
     public:
       explicit Message(dom::MediaStreamTrack* track)
-        : ControlMessage(track->GetInputStream()),
-          track_id_(track->GetInputTrackId())
-      {}
+        : ControlMessage(track->GetInputStream())
+        , track_id_(track->GetInputTrackId())
+      {
+      }
 
-      void Run() override {
-        mStream->AsSourceStream()->EndTrack(track_id_);
-      }
+      void Run() override { mStream->AsSourceStream()->EndTrack(track_id_); }
 
       const TrackID track_id_;
     };
 
     track_->GraphImpl()->AppendMessage(MakeUnique<Message>(track_));
     // This breaks the cycle with the SourceMediaStream
     track_->GetInputStream()->RemoveListener(this);
   }
@@ -1972,79 +2162,79 @@ class GenericReceiveListener : public Me
   // Must be called on the main thread
   void SetPrincipalHandle_m(const PrincipalHandle& principal_handle)
   {
     class Message : public ControlMessage
     {
     public:
       Message(GenericReceiveListener* listener,
               const PrincipalHandle& principal_handle)
-        : ControlMessage(nullptr),
-          listener_(listener),
-          principal_handle_(principal_handle)
-      {}
+        : ControlMessage(nullptr)
+        , listener_(listener)
+        , principal_handle_(principal_handle)
+      {
+      }
 
-      void Run() override {
+      void Run() override
+      {
         listener_->SetPrincipalHandle_msg(principal_handle_);
       }
 
       RefPtr<GenericReceiveListener> listener_;
       PrincipalHandle principal_handle_;
     };
 
-    track_->GraphImpl()->AppendMessage(MakeUnique<Message>(this, principal_handle));
+    track_->GraphImpl()->AppendMessage(
+      MakeUnique<Message>(this, principal_handle));
   }
 
   // Must be called on the MediaStreamGraph thread
   void SetPrincipalHandle_msg(const PrincipalHandle& principal_handle)
   {
     principal_handle_ = principal_handle;
   }
 
- protected:
+protected:
   RefPtr<dom::MediaStreamTrack> track_;
   TrackTicks played_ticks_;
   TrackTicks last_log_; // played_ticks_ when we last logged
   PrincipalHandle principal_handle_;
   bool listening_;
   Atomic<bool> maybe_track_needs_unmute_;
 };
 
-MediaPipelineReceive::MediaPipelineReceive(
-    const std::string& pc,
-    nsCOMPtr<nsIEventTarget> main_thread,
-    nsCOMPtr<nsIEventTarget> sts_thread,
-    RefPtr<MediaSessionConduit> conduit) :
-  MediaPipeline(pc, RECEIVE, main_thread, sts_thread, conduit),
-  segments_added_(0)
+MediaPipelineReceive::MediaPipelineReceive(const std::string& pc,
+                                           nsCOMPtr<nsIEventTarget> main_thread,
+                                           nsCOMPtr<nsIEventTarget> sts_thread,
+                                           RefPtr<MediaSessionConduit> conduit)
+  : MediaPipeline(pc, RECEIVE, main_thread, sts_thread, conduit)
+  , segments_added_(0)
 {
 }
 
-MediaPipelineReceive::~MediaPipelineReceive()
-{
-}
+MediaPipelineReceive::~MediaPipelineReceive() {}
 
 class MediaPipelineReceiveAudio::PipelineListener
   : public GenericReceiveListener
 {
 public:
   PipelineListener(dom::MediaStreamTrack* track,
                    const RefPtr<MediaSessionConduit>& conduit)
-    : GenericReceiveListener(track),
-      conduit_(conduit)
+    : GenericReceiveListener(track)
+    , conduit_(conduit)
   {
   }
 
   ~PipelineListener()
   {
     if (!NS_IsMainThread()) {
       // release conduit on mainthread.  Must use forget()!
-      nsresult rv = NS_DispatchToMainThread(new
-                                            ConduitDeleteEvent(conduit_.forget()));
-      MOZ_ASSERT(!NS_FAILED(rv),"Could not dispatch conduit shutdown to main");
+      nsresult rv =
+        NS_DispatchToMainThread(new ConduitDeleteEvent(conduit_.forget()));
+      MOZ_ASSERT(!NS_FAILED(rv), "Could not dispatch conduit shutdown to main");
       if (NS_FAILED(rv)) {
         MOZ_CRASH();
       }
     } else {
       conduit_ = nullptr;
     }
   }
 
@@ -2066,75 +2256,84 @@ public:
     while (source->TicksToTimeRoundDown(rate,
                                         played_ticks_) < desired_time) {
       int16_t scratch_buffer[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
 
       int samples_length;
 
       // This fetches 10ms of data, either mono or stereo
       MediaConduitErrorCode err =
-          static_cast<AudioSessionConduit*>(conduit_.get())->GetAudioFrame(
-              scratch_buffer,
-              rate,
-              0,  // TODO(ekr@rtfm.com): better estimate of "capture" (really playout) delay
-              samples_length);
+        static_cast<AudioSessionConduit*>(conduit_.get())
+          ->GetAudioFrame(scratch_buffer,
+                          rate,
+                          0, // TODO(ekr@rtfm.com): better estimate of "capture"
+                             // (really playout) delay
+                          samples_length);
 
       if (err != kMediaConduitNoError) {
         // Insert silence on conduit/GIPS failure (extremely unlikely)
-        CSFLogError(LOGTAG, "Audio conduit failed (%d) to return data @ %" PRId64 " (desired %" PRId64 " -> %f)",
-                    err, played_ticks_, desired_time,
+        CSFLogError(LOGTAG,
+                    "Audio conduit failed (%d) to return data @ %" PRId64
+                    " (desired %" PRId64 " -> %f)",
+                    err,
+                    played_ticks_,
+                    desired_time,
                     source->StreamTimeToSeconds(desired_time));
         // if this is not enough we'll loop and provide more
         samples_length = samples_per_10ms;
         PodArrayZero(scratch_buffer);
       }
 
-      MOZ_ASSERT(samples_length * sizeof(uint16_t) <= AUDIO_SAMPLE_BUFFER_MAX_BYTES);
+      MOZ_ASSERT(samples_length * sizeof(uint16_t) <=
+                 AUDIO_SAMPLE_BUFFER_MAX_BYTES);
 
-      CSFLogDebug(LOGTAG, "Audio conduit returned buffer of length %u",
-                  samples_length);
+      CSFLogDebug(
+        LOGTAG, "Audio conduit returned buffer of length %u", samples_length);
 
-      RefPtr<SharedBuffer> samples = SharedBuffer::Create(samples_length * sizeof(uint16_t));
-      int16_t *samples_data = static_cast<int16_t *>(samples->Data());
+      RefPtr<SharedBuffer> samples =
+        SharedBuffer::Create(samples_length * sizeof(uint16_t));
+      int16_t* samples_data = static_cast<int16_t*>(samples->Data());
       AudioSegment segment;
-      // We derive the number of channels of the stream from the number of samples
-      // the AudioConduit gives us, considering it gives us packets of 10ms and we
-      // know the rate.
+      // We derive the number of channels of the stream from the number of
+      // samples the AudioConduit gives us, considering it gives us packets of
+      // 10ms and we know the rate.
       uint32_t channelCount = samples_length / samples_per_10ms;
       AutoTArray<int16_t*,2> channels;
       AutoTArray<const int16_t*,2> outputChannels;
       size_t frames = samples_length / channelCount;
 
       channels.SetLength(channelCount);
 
       size_t offset = 0;
       for (size_t i = 0; i < channelCount; i++) {
         channels[i] = samples_data + offset;
         offset += frames;
       }
 
-      DeinterleaveAndConvertBuffer(scratch_buffer,
-                                   frames,
-                                   channelCount,
-                                   channels.Elements());
+      DeinterleaveAndConvertBuffer(
+        scratch_buffer, frames, channelCount, channels.Elements());
 
       outputChannels.AppendElements(channels);
 
-      segment.AppendFrames(samples.forget(), outputChannels, frames,
-                           principal_handle_);
+      segment.AppendFrames(
+        samples.forget(), outputChannels, frames, principal_handle_);
 
       // Handle track not actually added yet or removed/finished
       if (source->AppendToTrack(track_->GetInputTrackId(), &segment)) {
         played_ticks_ += frames;
         if (MOZ_LOG_TEST(AudioLogModule(), LogLevel::Debug)) {
           if (played_ticks_ > last_log_ + rate) { // ~ 1 second
-            MOZ_LOG(AudioLogModule(), LogLevel::Debug,
-                    ("%p: Inserting %zu samples into track %d, total = %" PRIu64,
-                     (void*) this, frames, track_->GetInputTrackId(),
-                     played_ticks_));
+            MOZ_LOG(
+              AudioLogModule(),
+              LogLevel::Debug,
+              ("%p: Inserting %zu samples into track %d, total = %" PRIu64,
+               (void*)this,
+               frames,
+               track_->GetInputTrackId(),
+               played_ticks_));
             last_log_ = played_ticks_;
           }
         }
       } else {
         CSFLogError(LOGTAG, "AppendToTrack failed");
         // we can't un-read the data, but that's ok since we don't want to
         // buffer - but don't i-loop!
         return;
@@ -2142,37 +2341,40 @@ public:
     }
   }
 
 private:
   RefPtr<MediaSessionConduit> conduit_;
 };
 
 MediaPipelineReceiveAudio::MediaPipelineReceiveAudio(
-    const std::string& pc,
-    nsCOMPtr<nsIEventTarget> main_thread,
-    nsCOMPtr<nsIEventTarget> sts_thread,
-    RefPtr<AudioSessionConduit> conduit,
-    dom::MediaStreamTrack* aTrack) :
-  MediaPipelineReceive(pc, main_thread, sts_thread, conduit),
-  listener_(aTrack ? new PipelineListener(aTrack, conduit_) : nullptr)
+  const std::string& pc,
+  nsCOMPtr<nsIEventTarget> main_thread,
+  nsCOMPtr<nsIEventTarget> sts_thread,
+  RefPtr<AudioSessionConduit> conduit,
+  dom::MediaStreamTrack* aTrack)
+  : MediaPipelineReceive(pc, main_thread, sts_thread, conduit)
+  , listener_(aTrack ? new PipelineListener(aTrack, conduit_) : nullptr)
 {
   description_ = pc_ + "| Receive audio";
 }
 
-void MediaPipelineReceiveAudio::DetachMedia()
+void
+MediaPipelineReceiveAudio::DetachMedia()
 {
   ASSERT_ON_THREAD(main_thread_);
   if (listener_) {
     listener_->EndTrack();
     listener_ = nullptr;
   }
 }
 
-void MediaPipelineReceiveAudio::SetPrincipalHandle_m(const PrincipalHandle& principal_handle)
+void
+MediaPipelineReceiveAudio::SetPrincipalHandle_m(
+  const PrincipalHandle& principal_handle)
 {
   if (listener_) {
     listener_->SetPrincipalHandle_m(principal_handle);
   }
 }
 
 void
 MediaPipelineReceiveAudio::Start()
@@ -2196,17 +2398,18 @@ void
 MediaPipelineReceiveAudio::OnRtpPacketReceived()
 {
   if (listener_) {
     listener_->OnRtpReceived();
   }
 }
 
 class MediaPipelineReceiveVideo::PipelineListener
-  : public GenericReceiveListener {
+  : public GenericReceiveListener
+{
 public:
   explicit PipelineListener(dom::MediaStreamTrack* track)
     : GenericReceiveListener(track)
     , image_container_()
     , image_()
     , mutex_("Video PipelineListener")
   {
     image_container_ =
@@ -2237,17 +2440,18 @@ public:
         return;
       }
     }
   }
 
   // Accessors for external writes from the renderer
   void FrameSizeChange(unsigned int width,
                        unsigned int height,
-                       unsigned int number_of_streams) {
+                       unsigned int number_of_streams)
+  {
     MutexAutoLock enter(mutex_);
 
     width_ = width;
     height_ = height;
   }
 
   void RenderVideoFrame(const webrtc::VideoFrameBuffer& buffer,
                         uint32_t time_stamp,
@@ -2297,21 +2501,24 @@ private:
   RefPtr<layers::ImageContainer> image_container_;
   RefPtr<layers::Image> image_;
   Mutex mutex_; // Mutex for processing WebRTC frames.
                 // Protects image_ against:
                 // - Writing from the GIPS thread
                 // - Reading from the MSG thread
 };
 
-class MediaPipelineReceiveVideo::PipelineRenderer : public mozilla::VideoRenderer
+class MediaPipelineReceiveVideo::PipelineRenderer
+  : public mozilla::VideoRenderer
 {
 public:
-  explicit PipelineRenderer(MediaPipelineReceiveVideo *pipeline) :
-    pipeline_(pipeline) {}
+  explicit PipelineRenderer(MediaPipelineReceiveVideo* pipeline)
+    : pipeline_(pipeline)
+  {
+  }
 
   void Detach() { pipeline_ = nullptr; }
 
   // Implement VideoRenderer
   void FrameSizeChange(unsigned int width,
                        unsigned int height,
                        unsigned int number_of_streams) override
   {
@@ -2321,50 +2528,52 @@ public:
   void RenderVideoFrame(const webrtc::VideoFrameBuffer& buffer,
                         uint32_t time_stamp,
                         int64_t render_time) override
   {
     pipeline_->listener_->RenderVideoFrame(buffer, time_stamp, render_time);
   }
 
 private:
-  MediaPipelineReceiveVideo *pipeline_;  // Raw pointer to avoid cycles
+  MediaPipelineReceiveVideo* pipeline_; // Raw pointer to avoid cycles
 };
 
-
 MediaPipelineReceiveVideo::MediaPipelineReceiveVideo(
-    const std::string& pc,
-    nsCOMPtr<nsIEventTarget> main_thread,
-    nsCOMPtr<nsIEventTarget> sts_thread,
-    RefPtr<VideoSessionConduit> conduit,
-    dom::MediaStreamTrack* aTrack) :
-  MediaPipelineReceive(pc, main_thread, sts_thread, conduit),
-  renderer_(new PipelineRenderer(this)),
-  listener_(aTrack ? new PipelineListener(aTrack) : nullptr)
+  const std::string& pc,
+  nsCOMPtr<nsIEventTarget> main_thread,
+  nsCOMPtr<nsIEventTarget> sts_thread,
+  RefPtr<VideoSessionConduit> conduit,
+  dom::MediaStreamTrack* aTrack)
+  : MediaPipelineReceive(pc, main_thread, sts_thread, conduit)
+  , renderer_(new PipelineRenderer(this))
+  , listener_(aTrack ? new PipelineListener(aTrack) : nullptr)
 {
   description_ = pc_ + "| Receive video";
   conduit->AttachRenderer(renderer_);
 }
 
-void MediaPipelineReceiveVideo::DetachMedia()
+void
+MediaPipelineReceiveVideo::DetachMedia()
 {
   ASSERT_ON_THREAD(main_thread_);
 
   // stop generating video and thus stop invoking the PipelineRenderer
   // and PipelineListener - the renderer has a raw ptr to the Pipeline to
   // avoid cycles, and the render callbacks are invoked from a different
   // thread so simple null-checks would cause TSAN bugs without locks.
   static_cast<VideoSessionConduit*>(conduit_.get())->DetachRenderer();
   if (listener_) {
     listener_->EndTrack();
     listener_ = nullptr;
   }
 }
 
-void MediaPipelineReceiveVideo::SetPrincipalHandle_m(const PrincipalHandle& principal_handle)
+void
+MediaPipelineReceiveVideo::SetPrincipalHandle_m(
+  const PrincipalHandle& principal_handle)
 {
   if (listener_) {
     listener_->SetPrincipalHandle_m(principal_handle);
   }
 }
 
 void
 MediaPipelineReceiveVideo::Start()
@@ -2387,40 +2596,44 @@ MediaPipelineReceiveVideo::Stop()
 void
 MediaPipelineReceiveVideo::OnRtpPacketReceived()
 {
   if (listener_) {
     listener_->OnRtpReceived();
   }
 }
 
-DOMHighResTimeStamp MediaPipeline::GetNow() {
+DOMHighResTimeStamp
+MediaPipeline::GetNow()
+{
   return webrtc::Clock::GetRealTimeClock()->TimeInMilliseconds();
 }
 
 DOMHighResTimeStamp
-MediaPipeline::RtpCSRCStats::GetExpiryFromTime(
-    const DOMHighResTimeStamp aTime) {
+MediaPipeline::RtpCSRCStats::GetExpiryFromTime(const DOMHighResTimeStamp aTime)
+{
   // DOMHighResTimeStamp is a unit measured in ms
   return aTime - EXPIRY_TIME_MILLISECONDS;
 }
 
 MediaPipeline::RtpCSRCStats::RtpCSRCStats(const uint32_t aCsrc,
                                           const DOMHighResTimeStamp aTime)
   : mCsrc(aCsrc)
-  , mTimestamp(aTime) {}
+  , mTimestamp(aTime)
+{
+}
 
 void
 MediaPipeline::RtpCSRCStats::GetWebidlInstance(
-    dom::RTCRTPContributingSourceStats& aWebidlObj,
-    const nsString &aInboundRtpStreamId) const
+  dom::RTCRTPContributingSourceStats& aWebidlObj,
+  const nsString& aInboundRtpStreamId) const
 {
   nsString statId = NS_LITERAL_STRING("csrc_") + aInboundRtpStreamId;
   statId.AppendLiteral("_");
   statId.AppendInt(mCsrc);
   aWebidlObj.mId.Construct(statId);
   aWebidlObj.mType.Construct(RTCStatsType::Csrc);
   aWebidlObj.mTimestamp.Construct(mTimestamp);
   aWebidlObj.mContributorSsrc.Construct(mCsrc);
   aWebidlObj.mInboundRtpStreamId.Construct(aInboundRtpStreamId);
 }
 
-}  // end namespace
+} // end namespace
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
@@ -33,18 +33,18 @@ class nsIPrincipal;
 
 namespace mozilla {
 class MediaPipelineFilter;
 class PeerIdentity;
 class AudioProxyThread;
 class VideoFrameConverter;
 
 namespace dom {
-  class MediaStreamTrack;
-  struct RTCRTPContributingSourceStats;
+class MediaStreamTrack;
+struct RTCRTPContributingSourceStats;
 } // namespace dom
 
 class SourceMediaStream;
 
 // A class that represents the pipeline of audio and video
 // The dataflow looks like:
 //
 // TRANSMIT
@@ -70,20 +70,30 @@ class SourceMediaStream;
 // One or another GIPS threads
 //   * Receives RTCP messages to send to the other side
 //   * Processes video frames GIPS wants to render
 //
 // For a transmitting conduit, "output" is RTP and "input" is RTCP.
 // For a receiving conduit, "input" is RTP and "output" is RTCP.
 //
 
-class MediaPipeline : public sigslot::has_slots<> {
- public:
-  enum Direction { TRANSMIT, RECEIVE };
-  enum State { MP_CONNECTING, MP_OPEN, MP_CLOSED };
+class MediaPipeline : public sigslot::has_slots<>
+{
+public:
+  enum Direction
+  {
+    TRANSMIT,
+    RECEIVE
+  };
+  enum State
+  {
+    MP_CONNECTING,
+    MP_OPEN,
+    MP_CLOSED
+  };
   MediaPipeline(const std::string& pc,
                 Direction direction,
                 nsCOMPtr<nsIEventTarget> main_thread,
                 nsCOMPtr<nsIEventTarget> sts_thread,
                 RefPtr<MediaSessionConduit> conduit);
 
   virtual void Start() = 0;
   virtual void Stop() = 0;
@@ -110,160 +120,160 @@ class MediaPipeline : public sigslot::ha
   // everything but the given RID
   void AddRIDFilter_m(const std::string& rid);
   void AddRIDFilter_s(const std::string& rid);
 
   virtual Direction direction() const { return direction_; }
   int level() const { return level_; }
   virtual bool IsVideo() const = 0;
 
-  bool IsDoingRtcpMux() const {
-    return (rtp_.type_ == MUX);
-  }
+  bool IsDoingRtcpMux() const { return (rtp_.type_ == MUX); }
 
-  class RtpCSRCStats {
+  class RtpCSRCStats
+  {
   public:
     // Gets an expiration time for CRC info given a reference time,
     //   this reference time would normally be the time of calling.
     //   This value can then be used to check if a RtpCSRCStats
     //   has expired via Expired(...)
-    static DOMHighResTimeStamp
-    GetExpiryFromTime(const DOMHighResTimeStamp aTime);
+    static DOMHighResTimeStamp GetExpiryFromTime(
+      const DOMHighResTimeStamp aTime);
 
-    RtpCSRCStats(const uint32_t aCsrc,
-                 const DOMHighResTimeStamp aTime);
-    ~RtpCSRCStats() {};
+    RtpCSRCStats(const uint32_t aCsrc, const DOMHighResTimeStamp aTime);
+    ~RtpCSRCStats(){};
     // Initialize a webidl representation suitable for adding to a report.
     //   This assumes that the webidl object is empty.
     // @param aWebidlObj the webidl binding object to popluate
     // @param aRtpInboundStreamId the associated RTCInboundRTPStreamStats.id
-    void
-    GetWebidlInstance(dom::RTCRTPContributingSourceStats& aWebidlObj,
-                             const nsString &aInboundRtpStreamId) const;
+    void GetWebidlInstance(dom::RTCRTPContributingSourceStats& aWebidlObj,
+                           const nsString& aInboundRtpStreamId) const;
     void SetTimestamp(const DOMHighResTimeStamp aTime) { mTimestamp = aTime; }
     // Check if the RtpCSRCStats has expired, checks against a
     //   given expiration time.
-    bool Expired(const DOMHighResTimeStamp aExpiry) const {
+    bool Expired(const DOMHighResTimeStamp aExpiry) const
+    {
       return mTimestamp < aExpiry;
     }
+
   private:
     static const double constexpr EXPIRY_TIME_MILLISECONDS = 10 * 1000;
     uint32_t mCsrc;
     DOMHighResTimeStamp mTimestamp;
   };
 
   // Gets the gathered contributing source stats for the last expiration period.
   // @param aId the stream id to use for populating inboundRtpStreamId field
   // @param aArr the array to append the stats objects to
-  void
-  GetContributingSourceStats(
-      const nsString& aInboundStreamId,
-      FallibleTArray<dom::RTCRTPContributingSourceStats>& aArr) const;
+  void GetContributingSourceStats(
+    const nsString& aInboundStreamId,
+    FallibleTArray<dom::RTCRTPContributingSourceStats>& aArr) const;
 
   int32_t rtp_packets_sent() const { return rtp_packets_sent_; }
   int64_t rtp_bytes_sent() const { return rtp_bytes_sent_; }
   int32_t rtcp_packets_sent() const { return rtcp_packets_sent_; }
   int32_t rtp_packets_received() const { return rtp_packets_received_; }
   int64_t rtp_bytes_received() const { return rtp_bytes_received_; }
   int32_t rtcp_packets_received() const { return rtcp_packets_received_; }
 
-  MediaSessionConduit *Conduit() const { return conduit_; }
+  MediaSessionConduit* Conduit() const { return conduit_; }
 
   // Thread counting
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaPipeline)
 
-  typedef enum {
-    RTP,
-    RTCP,
-    MUX,
-    MAX_RTP_TYPE
-  } RtpType;
+  typedef enum { RTP, RTCP, MUX, MAX_RTP_TYPE } RtpType;
 
   // Separate class to allow ref counting
-  class PipelineTransport : public TransportInterface {
-   public:
+  class PipelineTransport : public TransportInterface
+  {
+  public:
     // Implement the TransportInterface functions
-    explicit PipelineTransport(MediaPipeline *pipeline)
-        : pipeline_(pipeline),
-          sts_thread_(pipeline->sts_thread_) {}
+    explicit PipelineTransport(MediaPipeline* pipeline)
+      : pipeline_(pipeline)
+      , sts_thread_(pipeline->sts_thread_)
+    {
+    }
 
-    void Attach(MediaPipeline *pipeline) { pipeline_ = pipeline; }
+    void Attach(MediaPipeline* pipeline) { pipeline_ = pipeline; }
     void Detach() { pipeline_ = nullptr; }
-    MediaPipeline *pipeline() const { return pipeline_; }
+    MediaPipeline* pipeline() const { return pipeline_; }
 
     virtual nsresult SendRtpPacket(const uint8_t* data, size_t len);
     virtual nsresult SendRtcpPacket(const uint8_t* data, size_t len);
 
-   private:
-    nsresult SendRtpRtcpPacket_s(nsAutoPtr<DataBuffer> data,
-                                 bool is_rtp);
+  private:
+    nsresult SendRtpRtcpPacket_s(nsAutoPtr<DataBuffer> data, bool is_rtp);
 
     // Creates a cycle, which we break with Detach
     RefPtr<MediaPipeline> pipeline_;
     nsCOMPtr<nsIEventTarget> sts_thread_;
   };
 
- protected:
+protected:
   virtual ~MediaPipeline();
   nsresult AttachTransport_s();
   friend class PipelineTransport;
 
-  class TransportInfo {
-    public:
-      TransportInfo(RefPtr<TransportFlow> flow, RtpType type) :
-        transport_(flow),
-        state_(MP_CONNECTING),
-        type_(type) {
-      }
+  class TransportInfo
+  {
+  public:
+    TransportInfo(RefPtr<TransportFlow> flow, RtpType type)
+      : transport_(flow)
+      , state_(MP_CONNECTING)
+      , type_(type)
+    {
+    }
 
-      void Detach()
-      {
-        transport_ = nullptr;
-        send_srtp_ = nullptr;
-        recv_srtp_ = nullptr;
-      }
+    void Detach()
+    {
+      transport_ = nullptr;
+      send_srtp_ = nullptr;
+      recv_srtp_ = nullptr;
+    }
 
-      RefPtr<TransportFlow> transport_;
-      State state_;
-      RefPtr<SrtpFlow> send_srtp_;
-      RefPtr<SrtpFlow> recv_srtp_;
-      RtpType type_;
+    RefPtr<TransportFlow> transport_;
+    State state_;
+    RefPtr<SrtpFlow> send_srtp_;
+    RefPtr<SrtpFlow> recv_srtp_;
+    RtpType type_;
   };
 
   // The transport is down
-  virtual nsresult TransportFailed_s(TransportInfo &info);
+  virtual nsresult TransportFailed_s(TransportInfo& info);
   // The transport is ready
-  virtual nsresult TransportReady_s(TransportInfo &info);
-  void UpdateRtcpMuxState(TransportInfo &info);
+  virtual nsresult TransportReady_s(TransportInfo& info);
+  void UpdateRtcpMuxState(TransportInfo& info);
 
-  nsresult ConnectTransport_s(TransportInfo &info);
+  nsresult ConnectTransport_s(TransportInfo& info);
 
-  TransportInfo* GetTransportInfo_s(TransportFlow *flow);
+  TransportInfo* GetTransportInfo_s(TransportFlow* flow);
 
   void increment_rtp_packets_sent(int bytes);
   void increment_rtcp_packets_sent();
   void increment_rtp_packets_received(int bytes);
   virtual void OnRtpPacketReceived() {};
   void increment_rtcp_packets_received();
 
-  virtual nsresult SendPacket(TransportFlow *flow, const void *data, int len);
+  virtual nsresult SendPacket(TransportFlow* flow, const void* data, int len);
 
   // Process slots on transports
-  void StateChange(TransportFlow *flow, TransportLayer::State);
-  void RtpPacketReceived(TransportLayer *layer, const unsigned char *data,
+  void StateChange(TransportFlow* flow, TransportLayer::State);
+  void RtpPacketReceived(TransportLayer* layer,
+                         const unsigned char* data,
                          size_t len);
-  void RtcpPacketReceived(TransportLayer *layer, const unsigned char *data,
+  void RtcpPacketReceived(TransportLayer* layer,
+                          const unsigned char* data,
                           size_t len);
-  void PacketReceived(TransportLayer *layer, const unsigned char *data,
+  void PacketReceived(TransportLayer* layer,
+                      const unsigned char* data,
                       size_t len);
 
   Direction direction_;
   size_t level_;
-  RefPtr<MediaSessionConduit> conduit_;  // Our conduit. Written on the main
-                                         // thread. Read on STS thread.
+  RefPtr<MediaSessionConduit> conduit_; // Our conduit. Written on the main
+                                        // thread. Read on STS thread.
 
   // The transport objects. Read/written on STS thread.
   TransportInfo rtp_;
   TransportInfo rtcp_;
 
   // Pointers to the threads we need. Initialized at creation
   // and used all over the place.
   nsCOMPtr<nsIEventTarget> main_thread_;
@@ -289,41 +299,45 @@ class MediaPipeline : public sigslot::ha
   std::string description_;
 
   // Written in c'tor, all following accesses are on the STS thread.
   nsAutoPtr<MediaPipelineFilter> filter_;
   nsAutoPtr<webrtc::RtpHeaderParser> rtp_parser_;
 
   nsAutoPtr<PacketDumper> packet_dumper_;
 
- private:
+private:
   // Gets the current time as a DOMHighResTimeStamp
   static DOMHighResTimeStamp GetNow();
 
-  bool IsRtp(const unsigned char *data, size_t len);
+  bool IsRtp(const unsigned char* data, size_t len);
   // Must be called on the STS thread.  Must be called after DetachMedia().
   void DetachTransport_s();
 };
 
-class ConduitDeleteEvent: public Runnable
+class ConduitDeleteEvent : public Runnable
 {
 public:
-  explicit ConduitDeleteEvent(already_AddRefed<MediaSessionConduit> aConduit) :
-    Runnable("ConduitDeleteEvent"),
-    mConduit(aConduit) {}
+  explicit ConduitDeleteEvent(already_AddRefed<MediaSessionConduit> aConduit)
+    : Runnable("ConduitDeleteEvent")
+    , mConduit(aConduit)
+  {
+  }
 
   /* we exist solely to proxy release of the conduit */
   NS_IMETHOD Run() override { return NS_OK; }
+
 private:
   RefPtr<MediaSessionConduit> mConduit;
 };
 
 // A specialization of pipeline for reading from an input device
 // and transmitting to the network.
-class MediaPipelineTransmit : public MediaPipeline {
+class MediaPipelineTransmit : public MediaPipeline
+{
 public:
   // Set rtcp_transport to nullptr to use rtcp-mux
   MediaPipelineTransmit(const std::string& pc,
                         nsCOMPtr<nsIEventTarget> main_thread,
                         nsCOMPtr<nsIEventTarget> sts_thread,
                         bool is_video,
                         dom::MediaStreamTrack* domtrack,
                         RefPtr<MediaSessionConduit> conduit);
@@ -340,73 +354,74 @@ public:
   virtual void UpdateSinkIdentity_m(dom::MediaStreamTrack* track,
                                     nsIPrincipal* principal,
                                     const PeerIdentity* sinkIdentity);
 
   // Called on the main thread.
   void DetachMedia() override;
 
   // Override MediaPipeline::TransportReady.
-  nsresult TransportReady_s(TransportInfo &info) override;
+  nsresult TransportReady_s(TransportInfo& info) override;
 
   // Replace a track with a different one
   // In non-compliance with the likely final spec, allow the new
   // track to be part of a different stream (since we don't support
   // multiple tracks of a type in a stream yet).  bug 1056650
   virtual nsresult ReplaceTrack(RefPtr<dom::MediaStreamTrack>& domtrack);
 
   // Separate classes to allow ref counting
   class PipelineListener;
   class VideoFrameFeeder;
 
- protected:
+protected:
   ~MediaPipelineTransmit();
 
   void SetDescription();
 
- private:
+private:
   RefPtr<PipelineListener> listener_;
   RefPtr<AudioProxyThread> audio_processing_;
   RefPtr<VideoFrameFeeder> feeder_;
   RefPtr<VideoFrameConverter> converter_;
   bool is_video_;
   RefPtr<dom::MediaStreamTrack> domtrack_;
   bool transmitting_;
 };
 
-
 // A specialization of pipeline for reading from the network and
 // rendering media.
-class MediaPipelineReceive : public MediaPipeline {
- public:
+class MediaPipelineReceive : public MediaPipeline
+{
+public:
   // Set rtcp_transport to nullptr to use rtcp-mux
   MediaPipelineReceive(const std::string& pc,
                        nsCOMPtr<nsIEventTarget> main_thread,
                        nsCOMPtr<nsIEventTarget> sts_thread,
                        RefPtr<MediaSessionConduit> conduit);
 
   int segments_added() const { return segments_added_; }
 
   // Sets the PrincipalHandle we set on the media chunks produced by this
   // pipeline. Must be called on the main thread.
-  virtual void SetPrincipalHandle_m(const PrincipalHandle& principal_handle) = 0;
+  virtual void SetPrincipalHandle_m(
+    const PrincipalHandle& principal_handle) = 0;
 
- protected:
+protected:
   ~MediaPipelineReceive();
 
   int segments_added_;
 
- private:
+private:
 };
 
-
 // A specialization of pipeline for reading from the network and
 // rendering audio.
-class MediaPipelineReceiveAudio : public MediaPipelineReceive {
- public:
+class MediaPipelineReceiveAudio : public MediaPipelineReceive
+{
+public:
   MediaPipelineReceiveAudio(const std::string& pc,
                             nsCOMPtr<nsIEventTarget> main_thread,
                             nsCOMPtr<nsIEventTarget> sts_thread,
                             RefPtr<AudioSessionConduit> conduit,
                             dom::MediaStreamTrack* aTrack);
 
   void DetachMedia() override;
 
@@ -414,28 +429,28 @@ class MediaPipelineReceiveAudio : public
 
   void SetPrincipalHandle_m(const PrincipalHandle& principal_handle) override;
 
   void Start() override;
   void Stop() override;
 
   void OnRtpPacketReceived() override;
 
- private:
+private:
   // Separate class to allow ref counting
   class PipelineListener;
 
   RefPtr<PipelineListener> listener_;
 };
 
-
 // A specialization of pipeline for reading from the network and
 // rendering video.
-class MediaPipelineReceiveVideo : public MediaPipelineReceive {
- public:
+class MediaPipelineReceiveVideo : public MediaPipelineReceive
+{
+public:
   MediaPipelineReceiveVideo(const std::string& pc,
                             nsCOMPtr<nsIEventTarget> main_thread,
                             nsCOMPtr<nsIEventTarget> sts_thread,
                             RefPtr<VideoSessionConduit> conduit,
                             dom::MediaStreamTrack* aTrack);
 
   // Called on the main thread.
   void DetachMedia() override;
@@ -444,22 +459,21 @@ class MediaPipelineReceiveVideo : public
 
   void SetPrincipalHandle_m(const PrincipalHandle& principal_handle) override;
 
   void Start() override;
   void Stop() override;
 
   void OnRtpPacketReceived() override;
 
- private:
+private:
   class PipelineRenderer;
   friend class PipelineRenderer;
 
   // Separate class to allow ref counting
   class PipelineListener;
 
   RefPtr<PipelineRenderer> renderer_;
   RefPtr<PipelineListener> listener_;
 };
 
-
-}  // namespace mozilla
+} // namespace mozilla
 #endif