Bug 1397793 - Use a ControlMessage to switch between passthrough and processing mode for microphone input. r?pehsrons draft
authorPaul Adenot <paul@paul.cx>
Wed, 08 Nov 2017 14:10:45 +0100
changeset 707568 cf84d44df1fadd262283946641d7ce67f779b7d2
parent 707567 f80414c0e9048ee5eb9754a0161844b5607addb8
child 707569 a9724f42fbf33a08a08b720f42d25bda0b6623f1
push id92158
push userpaul@paul.cx
push dateTue, 05 Dec 2017 14:38:23 +0000
reviewerspehsrons
bugs1397793
milestone59.0a1
Bug 1397793 - Use a ControlMessage to switch between passthrough and processing mode for microphone input. r?pehsrons MozReview-Commit-ID: EIvQKZf7oGq
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/webrtc/moz.build
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -489,33 +489,37 @@ private:
   void UpdateAGCSettingsIfNeeded(bool aEnable, webrtc::AgcModes aMode);
   void UpdateNSSettingsIfNeeded(bool aEnable, webrtc::NsModes aMode);
 
   void SetLastPrefs(const MediaEnginePrefs& aPrefs);
 
   // These allocate/configure and release the channel
   bool AllocChannel();
   void FreeChannel();
-
-  // This is true when all processing is disabled, we can skip
-  // packetization, resampling and other processing passes.
-  bool PassThrough() {
-    return mSkipProcessing;
-  }
   template<typename T>
   void InsertInGraph(const T* aBuffer,
                      size_t aFrames,
                      uint32_t aChannels);
 
   void PacketizeAndProcess(MediaStreamGraph* aGraph,
                            const AudioDataValue* aBuffer,
                            size_t aFrames,
                            TrackRate aRate,
                            uint32_t aChannels);
 
+
+  // This is true when all processing is disabled, we can skip
+  // packetization, resampling and other processing passes.
+  bool PassThrough() {
+    return mSkipProcessing;
+  }
+  void SetPassThrough(bool aPassThrough) {
+    mSkipProcessing = aPassThrough;
+  }
+
   RefPtr<mozilla::AudioInput> mAudioInput;
   RefPtr<WebRTCAudioDataListener> mListener;
 
   // Note: shared across all microphone sources
   static int sChannelsOpen;
 
   const UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
   const RefPtr<AudioOutputObserver> mAudioOutputObserver;
@@ -543,17 +547,18 @@ private:
 
   int32_t mSampleFrequency;
   uint64_t mTotalFrames;
   uint64_t mLastLogFrames;
 
   // mSkipProcessing is true if none of the processing passes are enabled,
   // because of prefs or constraints. This allows simply copying the audio into
   // the MSG, skipping resampling and the whole webrtc.org code.
-  std::atomic_bool mSkipProcessing;
+  // This is read and written to only on the MSG thread.
+  bool mSkipProcessing;
 
   // To only update microphone when needed, we keep track of previous settings.
   MediaEnginePrefs mLastPrefs;
 
   AlignedFloatBuffer mInputBuffer;
   AlignedFloatBuffer mDeinterleavedBuffer;
   AlignedAudioBuffer mInputDownmixBuffer;
 };
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -5,16 +5,17 @@
 #include "MediaEngineWebRTC.h"
 #include <stdio.h>
 #include <algorithm>
 #include "mozilla/Assertions.h"
 #include "MediaTrackConstraints.h"
 #include "mtransport/runnable_utils.h"
 #include "nsAutoPtr.h"
 #include "AudioConverter.h"
+#include "MediaStreamGraphImpl.h"
 
 // scoped_ptr.h uses FF
 #ifdef FF
 #undef FF
 #endif
 #include "webrtc/modules/audio_device/opensl/single_rw_fifo.h"
 #include "webrtc/voice_engine/voice_engine_defines.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
@@ -505,33 +506,56 @@ MediaEngineWebRTCMicrophoneSource::Updat
   }
   SetLastPrefs(prefs);
   return NS_OK;
 }
 
 #undef HANDLE_APM_ERROR
 
 void
-MediaEngineWebRTCMicrophoneSource::SetLastPrefs(
-    const MediaEnginePrefs& aPrefs)
+MediaEngineWebRTCMicrophoneSource::SetLastPrefs(const MediaEnginePrefs& aPrefs)
 {
   mLastPrefs = aPrefs;
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
 
   NS_DispatchToMainThread(media::NewRunnableFrom([that, aPrefs]() mutable {
     that->mSettings->mEchoCancellation.Value() = aPrefs.mAecOn;
     that->mSettings->mAutoGainControl.Value() = aPrefs.mAgcOn;
     that->mSettings->mNoiseSuppression.Value() = aPrefs.mNoiseOn;
     that->mSettings->mChannelCount.Value() = aPrefs.mChannels;
+
+    class Message : public ControlMessage {
+    public:
+      Message(MediaEngineWebRTCMicrophoneSource* aSource,
+              bool aPassThrough)
+        : ControlMessage(nullptr)
+        , mMicrophoneSource(aSource)
+        , mPassThrough(aPassThrough)
+        {}
+
+      void Run() override
+      {
+        mMicrophoneSource->SetPassThrough(mPassThrough);
+      }
+
+    protected:
+      RefPtr<MediaEngineWebRTCMicrophoneSource> mMicrophoneSource;
+      bool mPassThrough;
+    };
+
+    bool passThrough = !(aPrefs.mAecOn || aPrefs.mAgcOn || aPrefs.mNoiseOn);
+    if (!that->mSources.IsEmpty()) {
+      that->mSources[0]->GraphImpl()->AppendMessage(MakeUnique<Message>(that, passThrough));
+    }
+
     return NS_OK;
   }));
 }
 
-
 nsresult
 MediaEngineWebRTCMicrophoneSource::Deallocate(AllocationHandle* aHandle)
 {
   AssertIsOnOwningThread();
 
   Super::Deallocate(aHandle);
 
   if (!mRegisteredHandles.Length()) {
@@ -658,17 +682,17 @@ void
 MediaEngineWebRTCMicrophoneSource::NotifyOutputData(MediaStreamGraph* aGraph,
                                                     AudioDataValue* aBuffer,
                                                     size_t aFrames,
                                                     TrackRate aRate,
                                                     uint32_t aChannels)
 {
   if (!PassThrough()) {
     mAudioOutputObserver->InsertFarEnd(aBuffer, aFrames, false,
-                                  aRate, aChannels);
+                                       aRate, aChannels);
   }
 }
 
 // Only called if we're not in passthrough mode
 void
 MediaEngineWebRTCMicrophoneSource::PacketizeAndProcess(MediaStreamGraph* aGraph,
                                                        const AudioDataValue* aBuffer,
                                                        size_t aFrames,
--- a/dom/media/webrtc/moz.build
+++ b/dom/media/webrtc/moz.build
@@ -34,21 +34,22 @@ if CONFIG['MOZ_WEBRTC']:
         'RTCCertificate.cpp',
         'RTCIdentityProviderRegistrar.cpp',
     ]
     # MediaEngineWebRTC.cpp needs to be built separately.
     SOURCES += [
         'MediaEngineWebRTC.cpp',
     ]
     LOCAL_INCLUDES += [
+        '..',
         '/dom/base',
         '/media/libyuv/libyuv/include',
         '/media/webrtc/signaling/src/common',
         '/media/webrtc/signaling/src/common/browser_logging',
-        '/media/webrtc/trunk',
+        '/media/webrtc/trunk'
     ]
 
 XPIDL_SOURCES += [
     'nsITabSource.idl'
 ]
 
 UNIFIED_SOURCES += [
     'MediaEngineDefault.cpp',