Bug 1404977 - Part 7 - Make each MediaEngineWebRTCMicrophoneSource independant. r?pehrsons draft
authorPaul Adenot <paul@paul.cx>
Mon, 30 Apr 2018 15:37:18 +0200
changeset 794147 79ebf1b2bd24b8da8da3bd2fbee5dc196637b18a
parent 794146 4914fb95154bcf53dba8aca4b0862649724fcaf2
child 794148 005538dcf876aad4c2d4c84a86e02d50195da41f
push id109576
push userachronop@gmail.com
push dateFri, 11 May 2018 11:11:31 +0000
reviewerspehrsons
bugs1404977
milestone62.0a1
Bug 1404977 - Part 7 - Make each MediaEngineWebRTCMicrophoneSource independant. r?pehrsons The concept of an "allocation" is not of any use anymore, now, but this is big enough so that I want to clean it up later. This allows having multiple stream for the same gUM call on the same device in the same document, and being able to apply different constraints to each of them. Also removes the last bit of global static, so that it is also possible to have multiple mics open in the same content process. MozReview-Commit-ID: CC2GVHwSuc1
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -194,20 +194,21 @@ private:
   Mutex mMutex;
   RefPtr<MediaEngineWebRTCMicrophoneSource> mAudioSource;
 };
 
 class MediaEngineWebRTCMicrophoneSource : public MediaEngineSource,
                                           public AudioDataListenerInterface
 {
 public:
-  MediaEngineWebRTCMicrophoneSource(mozilla::AudioInput* aAudioInput,
-                                    int aIndex,
-                                    const char* name,
-                                    const char* uuid,
+  MediaEngineWebRTCMicrophoneSource(mozilla::CubebDeviceEnumerator* aEnumerator,
+                                    CubebUtils::AudioDeviceID aID,
+                                    const nsString& name,
+                                    const nsCString& uuid,
+                                    uint32_t maxChannelCount,
                                     bool aDelayAgnostic,
                                     bool aExtendedFilter);
 
   bool RequiresSharing() const override
   {
     return true;
   }
 
@@ -250,16 +251,21 @@ public:
                         AudioDataValue* aBuffer, size_t aFrames,
                         TrackRate aRate, uint32_t aChannels) override;
   void NotifyInputData(MediaStreamGraph* aGraph,
                        const AudioDataValue* aBuffer, size_t aFrames,
                        TrackRate aRate, uint32_t aChannels) override;
 
   void DeviceChanged() override;
 
+  uint32_t InputChannelCount() override
+  {
+    return GetUserInputChannelCount();
+  }
+
   dom::MediaSourceEnum GetMediaSource() const override
   {
     return dom::MediaSourceEnum::Microphone;
   }
 
   nsresult TakePhoto(MediaEnginePhotoCallback* aCallback) override
   {
     return NS_ERROR_NOT_IMPLEMENTED;
@@ -367,24 +373,25 @@ private:
 
   // This is true when all processing is disabled, we can skip
   // packetization, resampling and other processing passes.
   // Graph thread only.
   bool PassThrough() const;
 
   // Graph thread only.
   void SetPassThrough(bool aPassThrough);
+  uint32_t GetUserInputChannelCount();
+  void SetUserInputChannelCount(uint32_t aUserInputChannelCount);
 
   // Owning thread only.
   RefPtr<WebRTCAudioDataListener> mListener;
+  RefPtr<mozilla::CubebDeviceEnumerator> mEnumerator;
+  // Number of times this devices has been opened for this MSG.
+  int mChannelsOpen;
 
-  // Note: shared across all microphone sources. Owning thread only.
-  static int sChannelsOpen;
-
-  const RefPtr<mozilla::AudioInput> mAudioInput;
   const UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
 
   // accessed from the GraphDriver thread except for deletion.
   nsAutoPtr<AudioPacketizer<AudioDataValue, float>> mPacketizerInput;
   nsAutoPtr<AudioPacketizer<AudioDataValue, float>> mPacketizerOutput;
 
   // mMutex protects some of our members off the owning thread.
   Mutex mMutex;
@@ -393,28 +400,32 @@ private:
   // Both the array and the Allocation members are modified under mMutex on
   // the owning thread. Accessed under one of the two.
   nsTArray<Allocation> mAllocations;
 
   // Current state of the shared resource for this source.
   // Set under mMutex on the owning thread. Accessed under one of the two
   MediaEngineSourceState mState = kReleased;
 
-  int mCapIndex;
+  CubebUtils::AudioDeviceID mDeviceID;
   bool mDelayAgnostic;
   bool mExtendedFilter;
   bool mStarted;
 
   const nsString mDeviceName;
   const nsCString mDeviceUUID;
 
   // The current settings for the underlying device.
   // Member access is main thread only after construction.
   const nsMainThreadPtrHandle<media::Refcountable<dom::MediaTrackSettings>> mSettings;
 
+  // The number of channels asked for by content, after clamping to the range of
+  // legal channel count for this particular device. This is the number of
+  // channels of the input buffer received.
+  uint32_t mUserInputChannelCount;
   uint64_t mTotalFrames;
   uint64_t mLastLogFrames;
 
   // mSkipProcessing is true if none of the processing passes are enabled,
   // because of prefs or constraints. This allows simply copying the audio into
   // the MSG, skipping resampling and the whole webrtc.org code.
   // This is read and written to only on the MSG thread.
   bool mSkipProcessing;
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -101,54 +101,56 @@ WebRTCAudioDataListener::Shutdown()
 {
   MutexAutoLock lock(mMutex);
   mAudioSource = nullptr;
 }
 
 /**
  * WebRTC Microphone MediaEngineSource.
  */
-int MediaEngineWebRTCMicrophoneSource::sChannelsOpen = 0;
 
 MediaEngineWebRTCMicrophoneSource::Allocation::Allocation(
     const RefPtr<AllocationHandle>& aHandle)
   : mHandle(aHandle)
 {}
 
 MediaEngineWebRTCMicrophoneSource::Allocation::~Allocation() = default;
 
 MediaEngineWebRTCMicrophoneSource::MediaEngineWebRTCMicrophoneSource(
-    mozilla::AudioInput* aAudioInput,
-    int aIndex,
-    const char* aDeviceName,
-    const char* aDeviceUUID,
+    mozilla::CubebDeviceEnumerator* aEnumerator,
+    CubebUtils::AudioDeviceID aID,
+    const nsString& aDeviceName,
+    const nsCString& aDeviceUUID,
+    uint32_t maxChannelCount,
     bool aDelayAgnostic,
     bool aExtendedFilter)
-  : mAudioInput(aAudioInput)
+  : mEnumerator(aEnumerator)
+  , mChannelsOpen(0)
   , mAudioProcessing(AudioProcessing::Create())
   , mMutex("WebRTCMic::Mutex")
-  , mCapIndex(aIndex)
+  , mDeviceID(aID)
   , mDelayAgnostic(aDelayAgnostic)
   , mExtendedFilter(aExtendedFilter)
   , mStarted(false)
-  , mDeviceName(NS_ConvertUTF8toUTF16(aDeviceName))
+  , mDeviceName(aDeviceName)
   , mDeviceUUID(aDeviceUUID)
   , mSettings(
       new nsMainThreadPtrHolder<media::Refcountable<dom::MediaTrackSettings>>(
         "MediaEngineWebRTCMicrophoneSource::mSettings",
         new media::Refcountable<dom::MediaTrackSettings>(),
         // Non-strict means it won't assert main thread for us.
         // It would be great if it did but we're already on the media thread.
         /* aStrict = */ false))
+  , mUserInputChannelCount(maxChannelCount)
   , mTotalFrames(0)
   , mLastLogFrames(0)
   , mSkipProcessing(false)
   , mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100)
 {
-  MOZ_ASSERT(aAudioInput);
+  MOZ_ASSERT(aEnumerator);
   mSettings->mEchoCancellation.Construct(0);
   mSettings->mAutoGainControl.Construct(0);
   mSettings->mNoiseSuppression.Construct(0);
   mSettings->mChannelCount.Construct(0);
   // We'll init lazily as needed
 }
 
 nsString
@@ -421,125 +423,139 @@ MediaEngineWebRTCMicrophoneSource::Updat
   AssertIsOnOwningThread();
 
   FlattenedConstraints c(aNetConstraints);
 
   MediaEnginePrefs prefs = aPrefs;
   prefs.mAecOn = c.mEchoCancellation.Get(prefs.mAecOn);
   prefs.mAgcOn = c.mAutoGainControl.Get(prefs.mAgcOn);
   prefs.mNoiseOn = c.mNoiseSuppression.Get(prefs.mNoiseOn);
-  uint32_t maxChannels = 1;
-  if (mAudioInput->GetMaxAvailableChannels(maxChannels) != 0) {
+
+  RefPtr<AudioDeviceInfo> info = mEnumerator->DeviceInfoFromID(mDeviceID);
+
+  if (!info) {
     return NS_ERROR_FAILURE;
   }
-  // Check channelCount violation
-  if (static_cast<int32_t>(maxChannels) < c.mChannelCount.mMin ||
-      static_cast<int32_t>(maxChannels) > c.mChannelCount.mMax) {
+
+  // Determine an actual channel count to use for this source. Three factors at
+  // play here: the device capabilities, the constraints passed in by content,
+  // and a pref that can force things (for testing)
+
+  // First, check channelCount violation wrt constraints. This throws in case of
+  // error.
+  if (static_cast<int32_t>(info->MaxChannels()) < c.mChannelCount.mMin ||
+      static_cast<int32_t>(info->MaxChannels()) > c.mChannelCount.mMax) {
     *aOutBadConstraint = "channelCount";
     return NS_ERROR_FAILURE;
   }
-  // Clamp channelCount to a valid value
+  // A pref can force the channel count to use. If the pref has a value of zero
+  // or lower, it has no effect.
   if (prefs.mChannels <= 0) {
-    prefs.mChannels = static_cast<int32_t>(maxChannels);
+    prefs.mChannels = static_cast<int32_t>(info->MaxChannels());
   }
+
+  // Get the number of channels asked for by content, and clamp it between the
+  // pref and the maximum number of channels that the device supports.
   prefs.mChannels = c.mChannelCount.Get(std::min(prefs.mChannels,
-                                        static_cast<int32_t>(maxChannels)));
-  // Clamp channelCount to a valid value
-  prefs.mChannels = std::max(1, std::min(prefs.mChannels, static_cast<int32_t>(maxChannels)));
+                                        static_cast<int32_t>(info->MaxChannels())));
 
   LOG(("Audio config: aec: %d, agc: %d, noise: %d, channels: %d",
       prefs.mAecOn ? prefs.mAec : -1,
       prefs.mAgcOn ? prefs.mAgc : -1,
       prefs.mNoiseOn ? prefs.mNoise : -1,
       prefs.mChannels));
 
   switch (mState) {
     case kReleased:
       MOZ_ASSERT(aHandle);
-      if (sChannelsOpen != 0) {
-        // Until we fix (or wallpaper) support for multiple mic input
-        // (Bug 1238038) fail allocation for a second device
-        return NS_ERROR_FAILURE;
-      }
-      if (mAudioInput->SetRecordingDevice(mCapIndex)) {
-         return NS_ERROR_FAILURE;
-      }
-      mAudioInput->SetUserChannelCount(prefs.mChannels);
       {
         MutexAutoLock lock(mMutex);
         mState = kAllocated;
+        mChannelsOpen++;
       }
-      sChannelsOpen++;
-      LOG(("Audio device %d allocated", mCapIndex));
-      {
-        // Update with the actual applied channelCount in order
-        // to store it in settings.
-        uint32_t channelCount = 0;
-        mAudioInput->GetChannelCount(channelCount);
-        MOZ_ASSERT(channelCount > 0);
-        prefs.mChannels = channelCount;
-      }
+      LOG(("Audio device %s allocated", NS_ConvertUTF16toUTF8(info->FriendlyName()).get()));
       break;
 
     case kStarted:
     case kStopped:
-      if (prefs.mChannels != mNetPrefs.mChannels) {
-        // If the channel count changed, tell the MSG to open a new driver with
-        // the correct channel count.
-        MOZ_ASSERT(!mAllocations.IsEmpty());
-        RefPtr<SourceMediaStream> stream;
-        for (const Allocation& allocation : mAllocations) {
-          if (allocation.mStream && allocation.mStream->GraphImpl()) {
-            stream = allocation.mStream;
-            break;
-          }
-        }
-        MOZ_ASSERT(stream);
+      if (prefs == mNetPrefs) {
+        return NS_OK;
+      }
 
-        mAudioInput->SetUserChannelCount(prefs.mChannels);
-        // Get validated number of channel
-        uint32_t channelCount = 0;
-        mAudioInput->GetChannelCount(channelCount);
-        MOZ_ASSERT(channelCount > 0 && mNetPrefs.mChannels > 0);
-        if (!stream->OpenNewAudioCallbackDriver(mListener)) {
-          MOZ_LOG(GetMediaManagerLog(), LogLevel::Error, ("Could not open a new AudioCallbackDriver for input"));
-          return NS_ERROR_FAILURE;
-        }
+      if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) {
+        LOG(("Audio device %s reallocated", NS_ConvertUTF16toUTF8(info->FriendlyName()).get()));
+      } else {
+        LOG(("Audio device %s allocated shared", NS_ConvertUTF16toUTF8(info->FriendlyName()).get()));
       }
       break;
 
     default:
-      LOG(("Audio device %d in ignored state %d", mCapIndex, mState));
+      LOG(("Audio device %s in ignored state %d", NS_ConvertUTF16toUTF8(info->FriendlyName()).get(), mState));
       break;
   }
 
-  if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) {
-    if (mAllocations.IsEmpty()) {
-      LOG(("Audio device %d reallocated", mCapIndex));
-    } else {
-      LOG(("Audio device %d allocated shared", mCapIndex));
-    }
-  }
-
-  if (sChannelsOpen > 0) {
+  if (mChannelsOpen > 0) {
     UpdateAGCSettingsIfNeeded(prefs.mAgcOn, static_cast<AgcModes>(prefs.mAgc));
     UpdateNSSettingsIfNeeded(prefs.mNoiseOn, static_cast<NsModes>(prefs.mNoise));
     UpdateAECSettingsIfNeeded(prefs.mAecOn, static_cast<EcModes>(prefs.mAec));
 
     webrtc::Config config;
     config.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(mExtendedFilter));
     config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(mDelayAgnostic));
     mAudioProcessing->SetExtraOptions(config);
   }
   mNetPrefs = prefs;
   return NS_OK;
 }
 
 #undef HANDLE_APM_ERROR
 
+bool
+MediaEngineWebRTCMicrophoneSource::PassThrough() const
+{
+  MOZ_ASSERT(!mAllocations.IsEmpty() &&
+             mAllocations[0].mStream &&
+             mAllocations[0].mStream->GraphImpl()->CurrentDriver()->OnThread());
+  return mSkipProcessing;
+}
+void
+MediaEngineWebRTCMicrophoneSource::SetPassThrough(bool aPassThrough)
+{
+  if (mAllocations.IsEmpty()) {
+    return;
+  }
+  MOZ_ASSERT(!mAllocations.IsEmpty() &&
+             mAllocations[0].mStream &&
+             mAllocations[0].mStream->GraphImpl()->CurrentDriver()->OnThread());
+  mSkipProcessing = aPassThrough;
+}
+
+uint32_t
+MediaEngineWebRTCMicrophoneSource::GetUserInputChannelCount()
+{
+  MOZ_ASSERT(!mAllocations.IsEmpty() &&
+             mAllocations[0].mStream &&
+             mAllocations[0].mStream->GraphImpl()->CurrentDriver()->OnThread());
+  return mUserInputChannelCount;
+}
+
+void
+MediaEngineWebRTCMicrophoneSource::SetUserInputChannelCount(
+  uint32_t aUserInputChannelCount)
+  {
+    if (mAllocations.IsEmpty()) {
+      return;
+  }
+  MOZ_ASSERT(!mAllocations.IsEmpty() &&
+             mAllocations[0].mStream &&
+             mAllocations[0].mStream->GraphImpl()->CurrentDriver()->OnThread());
+  mUserInputChannelCount = aUserInputChannelCount;
+  mAllocations[0].mStream->GraphImpl()->ReevaluateInputDevice();
+}
+
 void
 MediaEngineWebRTCMicrophoneSource::ApplySettings(const MediaEnginePrefs& aPrefs,
                                                  RefPtr<MediaStreamGraphImpl> aGraph)
 {
   AssertIsOnOwningThread();
   MOZ_DIAGNOSTIC_ASSERT(aGraph);
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
@@ -547,35 +563,41 @@ MediaEngineWebRTCMicrophoneSource::Apply
     that->mSettings->mEchoCancellation.Value() = aPrefs.mAecOn;
     that->mSettings->mAutoGainControl.Value() = aPrefs.mAgcOn;
     that->mSettings->mNoiseSuppression.Value() = aPrefs.mNoiseOn;
     that->mSettings->mChannelCount.Value() = aPrefs.mChannels;
 
     class Message : public ControlMessage {
     public:
       Message(MediaEngineWebRTCMicrophoneSource* aSource,
-              bool aPassThrough)
+              bool aPassThrough,
+              uint32_t aUserInputChannelCount)
         : ControlMessage(nullptr)
         , mMicrophoneSource(aSource)
         , mPassThrough(aPassThrough)
+        , mUserInputChannelCount(aUserInputChannelCount)
         {}
 
       void Run() override
       {
         mMicrophoneSource->SetPassThrough(mPassThrough);
+        mMicrophoneSource->SetUserInputChannelCount(mUserInputChannelCount);
       }
 
     protected:
       RefPtr<MediaEngineWebRTCMicrophoneSource> mMicrophoneSource;
       bool mPassThrough;
+      uint32_t mUserInputChannelCount;
     };
 
     bool passThrough = !(aPrefs.mAecOn || aPrefs.mAgcOn || aPrefs.mNoiseOn);
     if (graph) {
-      graph->AppendMessage(MakeUnique<Message>(that, passThrough));
+      graph->AppendMessage(MakeUnique<Message>(that,
+                                               passThrough,
+                                               aPrefs.mChannels));
     }
 
     return NS_OK;
   }));
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Allocate(const dom::MediaTrackConstraints &aConstraints,
@@ -612,59 +634,51 @@ MediaEngineWebRTCMicrophoneSource::Deall
 {
   AssertIsOnOwningThread();
 
   size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator());
   MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex);
   MOZ_DIAGNOSTIC_ASSERT(!mAllocations[i].mEnabled,
                         "Source should be stopped for the track before removing");
 
-  LOG(("Mic source %p allocation %p Deallocate()", this, aHandle.get()));
-
   if (mAllocations[i].mStream && IsTrackIDExplicit(mAllocations[i].mTrackID)) {
     mAllocations[i].mStream->EndTrack(mAllocations[i].mTrackID);
   }
 
   {
     MutexAutoLock lock(mMutex);
     mAllocations.RemoveElementAt(i);
   }
 
   if (mAllocations.IsEmpty()) {
     // If empty, no callbacks to deliver data should be occuring
     MOZ_ASSERT(mState != kReleased, "Source not allocated");
     MOZ_ASSERT(mState != kStarted, "Source not stopped");
-    MOZ_ASSERT(sChannelsOpen > 0);
-    --sChannelsOpen;
+    MOZ_ASSERT(mChannelsOpen > 0);
+    --mChannelsOpen;
 
     MutexAutoLock lock(mMutex);
     mState = kReleased;
-    LOG(("Audio device %d deallocated", mCapIndex));
+    LOG(("Audio device %s deallocated", NS_ConvertUTF16toUTF8(mDeviceName).get()));
   } else {
-    LOG(("Audio device %d deallocated but still in use", mCapIndex));
+    LOG(("Audio device %s deallocated but still in use", NS_ConvertUTF16toUTF8(mDeviceName).get()));
   }
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::SetTrack(const RefPtr<const AllocationHandle>& aHandle,
                                             const RefPtr<SourceMediaStream>& aStream,
                                             TrackID aTrackID,
                                             const PrincipalHandle& aPrincipal)
 {
   AssertIsOnOwningThread();
   MOZ_ASSERT(aStream);
   MOZ_ASSERT(IsTrackIDExplicit(aTrackID));
 
-  LOG(("Mic source %p allocation %p SetTrack() stream=%p, track=%" PRId32,
-       this, aHandle.get(), aStream.get(), aTrackID));
-
-  // Until we fix bug 1400488 we need to block a second tab (OuterWindow)
-  // from opening an already-open device.  If it's the same tab, they
-  // will share a Graph(), and we can allow it.
   if (!mAllocations.IsEmpty() &&
       mAllocations[0].mStream &&
       mAllocations[0].mStream->Graph() != aStream->Graph()) {
     return NS_ERROR_NOT_AVAILABLE;
   }
 
   size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator());
   MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex);
@@ -693,27 +707,32 @@ MediaEngineWebRTCMicrophoneSource::SetTr
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Start(const RefPtr<const AllocationHandle>& aHandle)
 {
   AssertIsOnOwningThread();
 
-  if (sChannelsOpen == 0) {
+  if (mChannelsOpen == 0) {
     return NS_ERROR_FAILURE;
   }
 
-  LOG(("Mic source %p allocation %p Start()", this, aHandle.get()));
-
   size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator());
   MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex,
                         "Can't start track that hasn't been added");
   Allocation& allocation = mAllocations[i];
 
+  // For now, we only allow opening a single audio input device per page,
+  // because we can only have one MSG per page.
+  if (allocation.mStream->GraphImpl()->InputDeviceID() &&
+      allocation.mStream->GraphImpl()->InputDeviceID() != mDeviceID) {
+    return NS_ERROR_FAILURE;
+  }
+
   MOZ_ASSERT(!allocation.mEnabled, "Source already started");
   {
     // This spans setting both the enabled state and mState.
     MutexAutoLock lock(mMutex);
     allocation.mEnabled = true;
 
 #ifdef DEBUG
     // Ensure that callback-tracking state is reset when callbacks start coming.
@@ -725,17 +744,17 @@ MediaEngineWebRTCMicrophoneSource::Start
     if (!mListener) {
       mListener = new WebRTCAudioDataListener(this);
     }
 
     // Make sure logger starts before capture
     AsyncLatencyLogger::Get(true);
 
     // Must be *before* StartSend() so it will notice we selected external input (full_duplex)
-    mAudioInput->StartRecording(allocation.mStream, mListener);
+    allocation.mStream->OpenAudioInput(mDeviceID, mListener);
 
     MOZ_ASSERT(mState != kReleased);
     mState = kStarted;
   }
 
   ApplySettings(mNetPrefs, allocation.mStream->GraphImpl());
 
   return NS_OK;
@@ -758,17 +777,17 @@ MediaEngineWebRTCMicrophoneSource::Stop(
     return NS_OK;
   }
 
   {
     // This spans setting both the enabled state and mState.
     MutexAutoLock lock(mMutex);
     allocation.mEnabled = false;
 
-    mAudioInput->StopRecording(allocation.mStream);
+    allocation.mStream->CloseAudioInput(mDeviceID, mListener);
 
     if (HasEnabledTrack()) {
       // Another track is keeping us from stopping
       return NS_OK;
     }
 
     MOZ_ASSERT(mState == kStarted, "Should be started when stopping");
     mState = kStopped;
@@ -793,16 +812,17 @@ MediaEngineWebRTCMicrophoneSource::GetSe
 void
 MediaEngineWebRTCMicrophoneSource::Pull(const RefPtr<const AllocationHandle>& aHandle,
                                         const RefPtr<SourceMediaStream>& aStream,
                                         TrackID aTrackID,
                                         StreamTime aDesiredTime,
                                         const PrincipalHandle& aPrincipalHandle)
 {
   StreamTime delta;
+  LOG_FRAMES(("NotifyPull, desired = %" PRId64, (int64_t) aDesiredTime));
 
   {
     MutexAutoLock lock(mMutex);
     size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator());
     if (i == mAllocations.NoIndex) {
       // This handle must have been deallocated. That's fine, and its track
       // will already be ended. No need to do anything.
       return;
@@ -828,17 +848,18 @@ MediaEngineWebRTCMicrophoneSource::Pull(
       return;
     }
 
     LOG_FRAMES(("Pulling %" PRId64 " frames of silence for allocation %p",
                 delta, mAllocations[i].mHandle.get()));
 
     // This assertion fails when we append silence here in the same iteration
     // as there were real audio samples already appended by the audio callback.
-    // Note that this is exempted until live samples and a subsequent chunk of silence have been appended to the track. This will cover cases like:
+    // Note that this is exempted until live samples and a subsequent chunk of
+    // silence have been appended to the track. This will cover cases like:
     // - After Start(), there is silence (maybe multiple times) appended before
     //   the first audio callback.
     // - After Start(), there is real data (maybe multiple times) appended
     //   before the first graph iteration.
     // And other combinations of order of audio sample sources.
     MOZ_ASSERT_IF(
       mAllocations[i].mEnabled &&
       mAllocations[i].mLiveFramesAppended &&
@@ -1073,28 +1094,16 @@ MediaEngineWebRTCMicrophoneSource::Packe
                            processedOutputChannelPointersConst,
                            mPacketizerInput->PacketSize(),
                            allocation.mPrincipal);
       allocation.mStream->AppendToTrack(allocation.mTrackID, &segment);
     }
   }
 }
 
-bool
-MediaEngineWebRTCMicrophoneSource::PassThrough() const
-{
-  return mSkipProcessing;
-}
-
-void
-MediaEngineWebRTCMicrophoneSource::SetPassThrough(bool aPassThrough)
-{
-  mSkipProcessing = aPassThrough;
-}
-
 template<typename T>
 void
 MediaEngineWebRTCMicrophoneSource::InsertInGraph(const T* aBuffer,
                                                  size_t aFrames,
                                                  uint32_t aChannels)
 {
   MutexAutoLock lock(mMutex);