Bug 1213414 - Implement channelCount audio constraint. r?padenot,jib draft
authorAlex Chronopoulos <achronop@gmail.com>
Thu, 29 Jun 2017 21:01:17 -0700
changeset 602509 a1b3a9eeb0e0f22fb28ea4a1f98d45c6ca6faf96
parent 602508 36170aa1649689808dfe99d142460c49a0823fc7
child 635619 1ddd10db2bd13a74ceb8b96324a27018a93a4e08
push id66454
push userachronop@gmail.com
push dateFri, 30 Jun 2017 04:01:16 +0000
reviewerspadenot, jib
bugs1213414
milestone56.0a1
Bug 1213414 - Implement channelCount audio constraint. r?padenot,jib MozReview-Commit-ID: K95iBYOE1nR
dom/media/GraphDriver.cpp
dom/media/MediaManager.cpp
dom/media/MediaStreamGraph.cpp
dom/media/MediaStreamGraph.h
dom/media/tests/mochitest/test_getUserMedia_constraints.html
dom/media/webrtc/MediaEngine.h
dom/media/webrtc/MediaEngineWebRTC.cpp
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/webrtc/MediaTrackConstraints.cpp
dom/media/webrtc/MediaTrackConstraints.h
modules/libpref/init/all.js
--- a/dom/media/GraphDriver.cpp
+++ b/dom/media/GraphDriver.cpp
@@ -670,20 +670,19 @@ AudioCallbackDriver::Init()
 
   input = output;
   input.channels = mInputChannels;
   input.layout = CUBEB_LAYOUT_UNDEFINED;
 
 #ifdef MOZ_WEBRTC
   if (mGraphImpl->mInputWanted) {
     StaticMutexAutoLock lock(AudioInputCubeb::Mutex());
-    uint32_t maxInputChannels = 0;
-    if (AudioInputCubeb::GetDeviceMaxChannels(mGraphImpl->mInputDeviceID, maxInputChannels) == 0) {
-      input.channels = mInputChannels = maxInputChannels;
-    }
+    uint32_t userChannels = 0;
+    AudioInputCubeb::GetUserChannelCount(mGraphImpl->mInputDeviceID, userChannels);
+    input.channels = mInputChannels = userChannels;
   }
 #endif
 
   cubeb_stream* stream = nullptr;
   CubebUtils::AudioDeviceID input_id = nullptr, output_id = nullptr;
   // We have to translate the deviceID values to cubeb devid's since those can be
   // freed whenever enumerate is called.
   {
--- a/dom/media/MediaManager.cpp
+++ b/dom/media/MediaManager.cpp
@@ -1810,32 +1810,35 @@ MediaManager::MediaManager()
   mPrefs.mNoise        = webrtc::kNsUnchanged;
 #else
   mPrefs.mAec          = 0;
   mPrefs.mAgc          = 0;
   mPrefs.mNoise        = 0;
 #endif
   mPrefs.mPlayoutDelay = 0;
   mPrefs.mFullDuplex = false;
+  mPrefs.mChannels     = 0; // max channels default
   nsresult rv;
   nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv);
   if (NS_SUCCEEDED(rv)) {
     nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs);
     if (branch) {
       GetPrefs(branch, nullptr);
     }
   }
   LOG(("%s: default prefs: %dx%d @%dfps (min %d), %dHz test tones, aec: %s,"
        "agc: %s, noise: %s, aec level: %d, agc level: %d, noise level: %d,"
-       "playout delay: %d, %sfull_duplex, extended aec %s, delay_agnostic %s",
+       "playout delay: %d, %sfull_duplex, extended aec %s, delay_agnostic %s "
+       "channels %d",
        __FUNCTION__, mPrefs.mWidth, mPrefs.mHeight,
        mPrefs.mFPS, mPrefs.mMinFPS, mPrefs.mFreq, mPrefs.mAecOn ? "on" : "off",
        mPrefs.mAgcOn ? "on": "off", mPrefs.mNoiseOn ? "on": "off", mPrefs.mAec,
        mPrefs.mAgc, mPrefs.mNoise, mPrefs.mPlayoutDelay, mPrefs.mFullDuplex ? "" : "not ",
-       mPrefs.mExtendedFilter ? "on" : "off", mPrefs.mDelayAgnostic ? "on" : "off"));
+       mPrefs.mExtendedFilter ? "on" : "off", mPrefs.mDelayAgnostic ? "on" : "off",
+       mPrefs.mChannels));
 }
 
 NS_IMPL_ISUPPORTS(MediaManager, nsIMediaManagerService, nsIObserver)
 
 /* static */ StaticRefPtr<MediaManager> MediaManager::sSingleton;
 
 #ifdef DEBUG
 /* static */ bool
@@ -1899,16 +1902,17 @@ MediaManager::Get() {
       prefs->AddObserver("media.getusermedia.aec_enabled", sSingleton, false);
       prefs->AddObserver("media.getusermedia.aec", sSingleton, false);
       prefs->AddObserver("media.getusermedia.agc_enabled", sSingleton, false);
       prefs->AddObserver("media.getusermedia.agc", sSingleton, false);
       prefs->AddObserver("media.getusermedia.noise_enabled", sSingleton, false);
       prefs->AddObserver("media.getusermedia.noise", sSingleton, false);
       prefs->AddObserver("media.getusermedia.playout_delay", sSingleton, false);
       prefs->AddObserver("media.ondevicechange.fakeDeviceChangeEvent.enabled", sSingleton, false);
+      prefs->AddObserver("media.getusermedia.channels", sSingleton, false);
 #endif
     }
 
     // Prepare async shutdown
 
     nsCOMPtr<nsIAsyncShutdownClient> shutdownPhase = GetShutdownPhase();
 
     class Blocker : public media::ShutdownBlocker
@@ -2922,16 +2926,17 @@ MediaManager::GetPrefs(nsIPrefBranch *aB
   GetPrefBool(aBranch, "media.getusermedia.agc_enabled", aData, &mPrefs.mAgcOn);
   GetPrefBool(aBranch, "media.getusermedia.noise_enabled", aData, &mPrefs.mNoiseOn);
   GetPref(aBranch, "media.getusermedia.aec", aData, &mPrefs.mAec);
   GetPref(aBranch, "media.getusermedia.agc", aData, &mPrefs.mAgc);
   GetPref(aBranch, "media.getusermedia.noise", aData, &mPrefs.mNoise);
   GetPref(aBranch, "media.getusermedia.playout_delay", aData, &mPrefs.mPlayoutDelay);
   GetPrefBool(aBranch, "media.getusermedia.aec_extended_filter", aData, &mPrefs.mExtendedFilter);
   GetPrefBool(aBranch, "media.getusermedia.aec_aec_delay_agnostic", aData, &mPrefs.mDelayAgnostic);
+  GetPref(aBranch, "media.getusermedia.channels", aData, &mPrefs.mChannels);
   GetPrefBool(aBranch, "media.ondevicechange.fakeDeviceChangeEvent.enabled", aData, &mPrefs.mFakeDeviceChangeEventOn);
 #endif
   GetPrefBool(aBranch, "media.navigator.audio.full_duplex", aData, &mPrefs.mFullDuplex);
 }
 
 void
 MediaManager::Shutdown()
 {
@@ -2960,16 +2965,17 @@ MediaManager::Shutdown()
     prefs->RemoveObserver("media.getusermedia.aec_enabled", this);
     prefs->RemoveObserver("media.getusermedia.aec", this);
     prefs->RemoveObserver("media.getusermedia.agc_enabled", this);
     prefs->RemoveObserver("media.getusermedia.agc", this);
     prefs->RemoveObserver("media.getusermedia.noise_enabled", this);
     prefs->RemoveObserver("media.getusermedia.noise", this);
     prefs->RemoveObserver("media.getusermedia.playout_delay", this);
     prefs->RemoveObserver("media.ondevicechange.fakeDeviceChangeEvent.enabled", this);
+    prefs->RemoveObserver("media.getusermedia.channels", this);
 #endif
     prefs->RemoveObserver("media.navigator.audio.full_duplex", this);
   }
 
   // Close off any remaining active windows.
   GetActiveWindows()->Clear();
   mActiveCallbacks.Clear();
   mCallIds.Clear();
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -3176,16 +3176,32 @@ SourceMediaStream::HasPendingAudioTrack(
       audioTrackPresent = true;
       break;
     }
   }
 
   return audioTrackPresent;
 }
 
+bool
+SourceMediaStream::OpenNewAudioCallbackDriver(AudioDataListener * aListener)
+{
+  MOZ_ASSERT(GraphImpl()->mLifecycleState ==
+      MediaStreamGraphImpl::LifecycleState::LIFECYCLE_RUNNING);
+  AudioCallbackDriver* nextDriver = new AudioCallbackDriver(GraphImpl());
+  nextDriver->SetInputListener(aListener);
+  {
+    MonitorAutoLock lock(GraphImpl()->GetMonitor());
+    GraphImpl()->CurrentDriver()->SwitchAtNextIteration(nextDriver);
+  }
+
+  return true;
+}
+
+
 void
 MediaInputPort::Init()
 {
   LOG(LogLevel::Debug,
       ("Adding MediaInputPort %p (from %p to %p) to the graph",
        this,
        mSource,
        mDest));
--- a/dom/media/MediaStreamGraph.h
+++ b/dom/media/MediaStreamGraph.h
@@ -803,16 +803,18 @@ public:
    */
   bool HasPendingAudioTrack();
 
   TimeStamp GetStreamTracksStrartTimeStamp() {
     MutexAutoLock lock(mMutex);
     return mStreamTracksStartTimeStamp;
   }
 
+  bool OpenNewAudioCallbackDriver(AudioDataListener *aListener);
+
   // XXX need a Reset API
 
   friend class MediaStreamGraphImpl;
 
 protected:
   enum TrackCommands : uint32_t;
 
   virtual ~SourceMediaStream();
--- a/dom/media/tests/mochitest/test_getUserMedia_constraints.html
+++ b/dom/media/tests/mochitest/test_getUserMedia_constraints.html
@@ -69,17 +69,17 @@ var tests = [
     error: null },
   { message: "legacy facingMode ignored",
     constraints: { video: { mandatory: { facingMode: 'left' } } },
     error: null },
 ];
 
 var mustSupport = [
   'width', 'height', 'frameRate', 'facingMode', 'deviceId',
-  'echoCancellation', 'noiseSuppression', 'autoGainControl',
+  'echoCancellation', 'noiseSuppression', 'autoGainControl', 'channelCount',
 
   // Yet to add:
   //  'aspectRatio', 'volume', 'sampleRate', 'sampleSize', 'latency', 'groupId'
 
   // http://fluffy.github.io/w3c-screen-share/#screen-based-video-constraints
   // OBE by http://w3c.github.io/mediacapture-screen-share
   'mediaSource',
 
--- a/dom/media/webrtc/MediaEngine.h
+++ b/dom/media/webrtc/MediaEngine.h
@@ -100,16 +100,17 @@ public:
     , mAec(0)
     , mAgc(0)
     , mNoise(0)
     , mPlayoutDelay(0)
     , mFullDuplex(false)
     , mExtendedFilter(false)
     , mDelayAgnostic(false)
     , mFakeDeviceChangeEventOn(false)
+    , mChannels(0)
   {}
 
   int32_t mWidth;
   int32_t mHeight;
   int32_t mFPS;
   int32_t mMinFPS;
   int32_t mFreq; // for test tones (fake:true)
   bool mAecOn;
@@ -118,16 +119,17 @@ public:
   int32_t mAec;
   int32_t mAgc;
   int32_t mNoise;
   int32_t mPlayoutDelay;
   bool mFullDuplex;
   bool mExtendedFilter;
   bool mDelayAgnostic;
   bool mFakeDeviceChangeEventOn;
+  int32_t mChannels;
 
   // mWidth and/or mHeight may be zero (=adaptive default), so use functions.
 
   int32_t GetWidth(bool aHD = false) const {
     return mWidth? mWidth : (mHeight?
                              (mHeight * GetDefWidth(aHD)) / GetDefHeight(aHD) :
                              GetDefWidth(aHD));
   }
--- a/dom/media/webrtc/MediaEngineWebRTC.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTC.cpp
@@ -36,16 +36,17 @@ namespace mozilla {
 
 // statics from AudioInputCubeb
 nsTArray<int>* AudioInputCubeb::mDeviceIndexes;
 int AudioInputCubeb::mDefaultDevice = -1;
 nsTArray<nsCString>* AudioInputCubeb::mDeviceNames;
 cubeb_device_collection AudioInputCubeb::mDevices = { nullptr, 0 };
 bool AudioInputCubeb::mAnyInUse = false;
 StaticMutex AudioInputCubeb::sMutex;
+uint32_t AudioInputCubeb::sUserChannelCount = 0;
 
 // AudioDeviceID is an annoying opaque value that's really a string
 // pointer, and is freed when the cubeb_device_collection is destroyed
 
 void AudioInputCubeb::UpdateDeviceList()
 {
   cubeb* cubebContext = CubebUtils::GetCubebContext();
   if (!cubebContext) {
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -147,20 +147,22 @@ public:
   // Threadsafe because it's referenced from an MicrophoneSource, which can
   // had references to it on other threads.
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioInput)
 
   virtual int GetNumOfRecordingDevices(int& aDevices) = 0;
   virtual int GetRecordingDeviceName(int aIndex, char (&aStrNameUTF8)[128],
                                      char aStrGuidUTF8[128]) = 0;
   virtual int GetRecordingDeviceStatus(bool& aIsAvailable) = 0;
-  virtual int GetChannelCount(int aDeviceIndex, uint32_t& aChannels) = 0;
+  virtual void GetChannelCount(uint32_t& aChannels) = 0;
+  virtual int GetMaxAvailableChannels(uint32_t& aChannels) = 0;
   virtual void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener) = 0;
   virtual void StopRecording(SourceMediaStream *aStream) = 0;
   virtual int SetRecordingDevice(int aIndex) = 0;
+  virtual void SetUserChannelCount(uint32_t aChannels) = 0;
 
 protected:
   // Protected destructor, to discourage deletion outside of Release():
   virtual ~AudioInput() {}
 
   webrtc::VoiceEngine* mVoiceEngine;
 };
 
@@ -259,35 +261,57 @@ public:
   int GetRecordingDeviceStatus(bool& aIsAvailable)
   {
     // With cubeb, we only expose devices of type CUBEB_DEVICE_TYPE_INPUT,
     // so unless it was removed, say it's available
     aIsAvailable = true;
     return 0;
   }
 
-  int GetChannelCount(int aDeviceIndex, uint32_t& aChannels)
+  void GetChannelCount(uint32_t& aChannels)
+  {
+    GetUserChannelCount(mSelectedDevice, aChannels);
+  }
+
+  static void GetUserChannelCount(int aDeviceIndex, uint32_t& aChannels)
   {
-    return GetDeviceMaxChannels(aDeviceIndex, aChannels);
+    aChannels = sUserChannelCount;
+  }
+
+  int GetMaxAvailableChannels(uint32_t& aChannels)
+  {
+    return GetDeviceMaxChannels(mSelectedDevice, aChannels);
   }
 
   static int GetDeviceMaxChannels(int aDeviceIndex, uint32_t& aChannels)
   {
 #ifdef MOZ_WIDGET_ANDROID
     aChannels = 1;
 #else
     int32_t devindex = DeviceIndex(aDeviceIndex);
     if (mDevices.count == 0 || devindex < 0) {
       return 1;
     }
     aChannels = mDevices.device[devindex].max_channels;
 #endif
     return 0;
   }
 
+  void SetUserChannelCount(uint32_t aChannels)
+  {
+    if (GetDeviceMaxChannels(mSelectedDevice, sUserChannelCount)) {
+      sUserChannelCount = 1; // error capture mono
+      return;
+    }
+
+    if (aChannels && aChannels < sUserChannelCount) {
+      sUserChannelCount = aChannels;
+    }
+  }
+
   void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener)
   {
 #ifdef MOZ_WIDGET_ANDROID
     // OpenSL ES does not support enumerating devices.
     MOZ_ASSERT(mDevices.count == 0);
 #else
     MOZ_ASSERT(mDevices.count > 0);
 #endif
@@ -339,16 +363,17 @@ private:
 
   // pointers to avoid static constructors
   static nsTArray<int>* mDeviceIndexes;
   static int mDefaultDevice; // -1 == not set
   static nsTArray<nsCString>* mDeviceNames;
   static cubeb_device_collection mDevices;
   static bool mAnyInUse;
   static StaticMutex sMutex;
+  static uint32_t sUserChannelCount;
 };
 
 class AudioInputWebRTC final : public AudioInput
 {
 public:
   explicit AudioInputWebRTC(webrtc::VoiceEngine* aVoiceEngine) : AudioInput(aVoiceEngine) {}
 
   int GetNumOfRecordingDevices(int& aDevices)
@@ -379,22 +404,30 @@ public:
     ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
     if (!ptrVoEHw)  {
       return 1;
     }
     ptrVoEHw->GetRecordingDeviceStatus(aIsAvailable);
     return 0;
   }
 
-  int GetChannelCount(int aDeviceIndex, uint32_t& aChannels)
+  void GetChannelCount(uint32_t& aChannels)
   {
     aChannels = 1; // default to mono
+  }
+
+  int GetMaxAvailableChannels(uint32_t& aChannels)
+  {
+    aChannels = 1;
     return 0;
   }
 
+  void SetUserChannelCount(uint32_t aChannels)
+  {}
+
   void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener) {}
   void StopRecording(SourceMediaStream *aStream) {}
 
   int SetRecordingDevice(int aIndex)
   {
     ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
     ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
     if (!ptrVoEHw)  {
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -212,16 +212,17 @@ MediaEngineWebRTCMicrophoneSource::Media
   MOZ_ASSERT(aVoiceEnginePtr);
   MOZ_ASSERT(aAudioInput);
   mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
   mDeviceUUID.Assign(uuid);
   mListener = new mozilla::WebRTCAudioDataListener(this);
   mSettings.mEchoCancellation.Construct(0);
   mSettings.mAutoGainControl.Construct(0);
   mSettings.mNoiseSuppression.Construct(0);
+  mSettings.mChannelCount.Construct(0);
   // We'll init lazily as needed
 }
 
 void
 MediaEngineWebRTCMicrophoneSource::GetName(nsAString& aName) const
 {
   aName.Assign(mDeviceName);
   return;
@@ -283,53 +284,99 @@ MediaEngineWebRTCMicrophoneSource::Updat
     const char** aOutBadConstraint)
 {
   FlattenedConstraints c(aNetConstraints);
 
   MediaEnginePrefs prefs = aPrefs;
   prefs.mAecOn = c.mEchoCancellation.Get(prefs.mAecOn);
   prefs.mAgcOn = c.mAutoGainControl.Get(prefs.mAgcOn);
   prefs.mNoiseOn = c.mNoiseSuppression.Get(prefs.mNoiseOn);
+  uint32_t maxChannels = 1;
+  if (mAudioInput->GetMaxAvailableChannels(maxChannels) != 0) {
+    return NS_ERROR_FAILURE;
+  }
+  // Check channelCount violation
+  if (static_cast<int32_t>(maxChannels) < c.mChannelCount.mMin ||
+      static_cast<int32_t>(maxChannels) > c.mChannelCount.mMax) {
+    *aOutBadConstraint = "channelCount";
+    return NS_ERROR_FAILURE;
+  }
+  // Clamp channelCount to a valid value
+  if (prefs.mChannels <= 0) {
+    prefs.mChannels = static_cast<int32_t>(maxChannels);
+  }
+  prefs.mChannels = c.mChannelCount.Get(std::min(prefs.mChannels,
+                                        static_cast<int32_t>(maxChannels)));
+  // Clamp channelCount to a valid value
+  prefs.mChannels = std::max(1, std::min(prefs.mChannels, static_cast<int32_t>(maxChannels)));
 
-  LOG(("Audio config: aec: %d, agc: %d, noise: %d, delay: %d",
-       prefs.mAecOn ? prefs.mAec : -1,
-       prefs.mAgcOn ? prefs.mAgc : -1,
-       prefs.mNoiseOn ? prefs.mNoise : -1,
-       prefs.mPlayoutDelay));
+  LOG(("Audio config: aec: %d, agc: %d, noise: %d, delay: %d, channels: %d",
+      prefs.mAecOn ? prefs.mAec : -1,
+      prefs.mAgcOn ? prefs.mAgc : -1,
+      prefs.mNoiseOn ? prefs.mNoise : -1,
+      prefs.mPlayoutDelay,
+      prefs.mChannels));
 
   mPlayoutDelay = prefs.mPlayoutDelay;
 
   switch (mState) {
     case kReleased:
       MOZ_ASSERT(aHandle);
       if (sChannelsOpen == 0) {
         if (!InitEngine()) {
           LOG(("Audio engine is not initalized"));
           return NS_ERROR_FAILURE;
         }
       } else {
         // Until we fix (or wallpaper) support for multiple mic input
         // (Bug 1238038) fail allocation for a second device
         return NS_ERROR_FAILURE;
       }
+      if (mAudioInput->SetRecordingDevice(mCapIndex)) {
+         return NS_ERROR_FAILURE;
+      }
+      mAudioInput->SetUserChannelCount(prefs.mChannels);
       if (!AllocChannel()) {
+        FreeChannel();
         LOG(("Audio device is not initalized"));
         return NS_ERROR_FAILURE;
       }
-      if (mAudioInput->SetRecordingDevice(mCapIndex)) {
-        FreeChannel();
-        return NS_ERROR_FAILURE;
+      LOG(("Audio device %d allocated", mCapIndex));
+      {
+        // Update with the actual applied channelCount in order
+        // to store it in settings.
+        uint32_t channelCount = 0;
+        mAudioInput->GetChannelCount(channelCount);
+        MOZ_ASSERT(channelCount > 0);
+        prefs.mChannels = channelCount;
       }
-      LOG(("Audio device %d allocated", mCapIndex));
       break;
 
     case kStarted:
       if (prefs == mLastPrefs) {
         return NS_OK;
       }
+
+      if (prefs.mChannels != mLastPrefs.mChannels) {
+        MOZ_ASSERT(mSources.Length() > 0);
+        auto& source = mSources.LastElement();
+        mAudioInput->SetUserChannelCount(prefs.mChannels);
+        // Get validated number of channel
+        uint32_t channelCount = 0;
+        mAudioInput->GetChannelCount(channelCount);
+        MOZ_ASSERT(channelCount > 0 && mLastPrefs.mChannels > 0);
+        // Check if new validated channels is the same as previous
+        if (static_cast<uint32_t>(mLastPrefs.mChannels) != channelCount
+            && !source->OpenNewAudioCallbackDriver(mListener)) {
+          return NS_ERROR_FAILURE;
+        }
+        // Update settings
+        prefs.mChannels = channelCount;
+      }
+
       if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) {
         MonitorAutoLock lock(mMonitor);
         if (mSources.IsEmpty()) {
           LOG(("Audio device %d reallocated", mCapIndex));
         } else {
           LOG(("Audio device %d allocated shared", mCapIndex));
         }
       }
@@ -379,16 +426,17 @@ MediaEngineWebRTCMicrophoneSource::SetLa
   mLastPrefs = aPrefs;
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
 
   NS_DispatchToMainThread(media::NewRunnableFrom([that, aPrefs]() mutable {
     that->mSettings.mEchoCancellation.Value() = aPrefs.mAecOn;
     that->mSettings.mAutoGainControl.Value() = aPrefs.mAgcOn;
     that->mSettings.mNoiseSuppression.Value() = aPrefs.mNoiseOn;
+    that->mSettings.mChannelCount.Value() = aPrefs.mChannels;
     return NS_OK;
   }));
 }
 
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Deallocate(AllocationHandle* aHandle)
 {
@@ -791,19 +839,19 @@ MediaEngineWebRTCMicrophoneSource::Alloc
 #endif // MOZ_B2G
 
         // Set "codec" to PCM, 32kHz on device's channels
         ScopedCustomReleasePtr<webrtc::VoECodec> ptrVoECodec(webrtc::VoECodec::GetInterface(mVoiceEngine));
         if (ptrVoECodec) {
           webrtc::CodecInst codec;
           strcpy(codec.plname, ENCODING);
           codec.channels = CHANNELS;
-          uint32_t channels = 0;
-          if (mAudioInput->GetChannelCount(mCapIndex, channels) == 0) {
-            codec.channels = channels;
+          uint32_t maxChannels = 0;
+          if (mAudioInput->GetMaxAvailableChannels(maxChannels) == 0) {
+            codec.channels = maxChannels;
           }
           MOZ_ASSERT(mSampleFrequency == 16000 || mSampleFrequency == 32000);
           codec.rate = SAMPLE_RATE(mSampleFrequency);
           codec.plfreq = mSampleFrequency;
           codec.pacsize = SAMPLE_LENGTH(mSampleFrequency);
           codec.pltype = 0; // Default payload type
 
           if (!ptrVoECodec->SetSendCodec(mChannel, codec)) {
--- a/dom/media/webrtc/MediaTrackConstraints.cpp
+++ b/dom/media/webrtc/MediaTrackConstraints.cpp
@@ -373,16 +373,19 @@ FlattenedConstraints::FlattenedConstrain
         mEchoCancellation.Intersect(set.mEchoCancellation);
     }
     if (mNoiseSuppression.Intersects(set.mNoiseSuppression)) {
         mNoiseSuppression.Intersect(set.mNoiseSuppression);
     }
     if (mAutoGainControl.Intersects(set.mAutoGainControl)) {
         mAutoGainControl.Intersect(set.mAutoGainControl);
     }
+    if (mChannelCount.Intersects(set.mChannelCount)) {
+        mChannelCount.Intersect(set.mChannelCount);
+    }
   }
 }
 
 // MediaEngine helper
 //
 // The full algorithm for all devices. Sources that don't list capabilities
 // need to fake it and hardcode some by populating mHardcodedCapabilities above.
 //
--- a/dom/media/webrtc/MediaTrackConstraints.h
+++ b/dom/media/webrtc/MediaTrackConstraints.h
@@ -221,16 +221,17 @@ public:
   DoubleRange mFrameRate;
   StringRange mFacingMode;
   StringRange mMediaSource;
   LongLongRange mBrowserWindow;
   BooleanRange mScrollWithPage;
   StringRange mDeviceId;
   LongRange mViewportOffsetX, mViewportOffsetY, mViewportWidth, mViewportHeight;
   BooleanRange mEchoCancellation, mNoiseSuppression, mAutoGainControl;
+  LongRange mChannelCount;
 private:
   typedef NormalizedConstraintSet T;
 public:
   NormalizedConstraintSet(const dom::MediaTrackConstraintSet& aOther,
                           bool advanced,
                           nsTArray<MemberPtrType>* aList = nullptr)
   : mWidth(&T::mWidth, "width", aOther.mWidth, advanced, aList)
   , mHeight(&T::mHeight, "height", aOther.mHeight, advanced, aList)
@@ -253,17 +254,19 @@ public:
   , mViewportHeight(&T::mViewportHeight, "viewportHeight",
                     aOther.mViewportHeight, advanced, aList)
   , mEchoCancellation(&T::mEchoCancellation, "echoCancellation",
                       aOther.mEchoCancellation, advanced, aList)
   , mNoiseSuppression(&T::mNoiseSuppression, "noiseSuppression",
                       aOther.mNoiseSuppression,
                       advanced, aList)
   , mAutoGainControl(&T::mAutoGainControl, "autoGainControl",
-                     aOther.mAutoGainControl, advanced, aList) {}
+                     aOther.mAutoGainControl, advanced, aList)
+  , mChannelCount(&T::mChannelCount, "channelCount",
+                  aOther.mChannelCount, advanced, aList) {}
 };
 
 template<> bool NormalizedConstraintSet::Range<bool>::Merge(const Range& aOther);
 template<> void NormalizedConstraintSet::Range<bool>::FinalizeMerge();
 
 // Used instead of MediaTrackConstraints in lower-level code.
 struct NormalizedConstraints : public NormalizedConstraintSet
 {
--- a/modules/libpref/init/all.js
+++ b/modules/libpref/init/all.js
@@ -483,16 +483,17 @@ pref("media.navigator.video.max_fs", 122
 pref("media.navigator.video.max_fr", 60);
 pref("media.navigator.video.h264.level", 31); // 0x42E01f - level 3.1
 pref("media.navigator.video.h264.max_br", 0);
 pref("media.navigator.video.h264.max_mbps", 0);
 pref("media.peerconnection.video.h264_enabled", false);
 pref("media.peerconnection.video.vp9_enabled", true);
 pref("media.getusermedia.aec", 1);
 pref("media.getusermedia.browser.enabled", false);
+pref("media.getusermedia.channels", 0);
 // Desktop is typically VGA capture or more; and qm_select will not drop resolution
 // below 1/2 in each dimension (or so), so QVGA (320x200) is the lowest here usually.
 pref("media.peerconnection.video.min_bitrate", 0);
 pref("media.peerconnection.video.start_bitrate", 0);
 pref("media.peerconnection.video.max_bitrate", 0);
 pref("media.peerconnection.video.min_bitrate_estimate", 0);
 pref("media.peerconnection.video.denoising", false);
 pref("media.navigator.audio.fake_frequency", 1000);