Bug 1237816: count open input sources for MediaStreams to release inputs on Destroy() r?roc,padenot draft
authorRandell Jesup <rjesup@jesup.org>
Wed, 03 Feb 2016 21:12:51 -0500
changeset 328904 c02f55bdbf1a2546269fd5ac630514dcfee0e28e
parent 328605 271f1c468692223aecd0562c58f7f2ef07dccc0a
child 513871 38d3da316c3de49709a365e8086c75e444aef574
push id10429
push userrjesup@wgate.com
push dateThu, 04 Feb 2016 13:46:35 +0000
reviewersroc, padenot
bugs1237816
milestone47.0a1
Bug 1237816: count open input sources for MediaStreams to release inputs on Destroy() r?roc,padenot
dom/media/MediaStreamGraph.cpp
dom/media/MediaStreamGraph.h
dom/media/MediaStreamGraphImpl.h
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -922,28 +922,41 @@ MediaStreamGraphImpl::PlayVideo(MediaStr
     aStream->mLastPlayedVideoFrame.SetNull();
   }
 }
 
 void
 MediaStreamGraphImpl::OpenAudioInputImpl(CubebUtils::AudioDeviceID aID,
                                          AudioDataListener *aListener)
 {
- // Bug 1238038 Need support for multiple mics at once
-  MOZ_ASSERT(!mInputWanted);
-  if (mInputWanted) {
+  // Bug 1238038 Need support for multiple mics at once
+  if (mInputDeviceUsers.Count() > 0 &&
+      !mInputDeviceUsers.Get(aListener, nullptr)) {
+    NS_ASSERTION(false, "Input from multiple mics not yet supported; bug 1238038");
     // Need to support separate input-only AudioCallback drivers; they'll
     // call us back on "other" threads.  We will need to echo-cancel them, though.
     return;
   }
   mInputWanted = true;
+
+  // Add to count of users for this ID.
+  // XXX Since we can't rely on IDs staying valid (ugh), use the listener as
+  // a stand-in for the ID.  Fix as part of support for multiple-captures
+  // (Bug 1238038)
+  uint32_t count = 0;
+  mInputDeviceUsers.Get(aListener, &count); // ok if this fails
+  count++;
+  mInputDeviceUsers.Put(aListener, count); // creates a new entry in the hash if needed
+
   // aID is a cubeb_devid, and we assume that opaque ptr is valid until
   // we close cubeb.
   mInputDeviceID = aID;
-  mAudioInputs.AppendElement(aListener); // always monitor speaker data
+  if (count == 1) { // first open for this listener
+    mAudioInputs.AppendElement(aListener); // always monitor speaker data
+  }
 
   // Switch Drivers since we're adding input (to input-only or full-duplex)
   MonitorAutoLock mon(mMonitor);
   if (mLifecycleState == LIFECYCLE_RUNNING) {
     AudioCallbackDriver* driver = new AudioCallbackDriver(this);
     CurrentDriver()->SwitchAtNextIteration(driver);
   }
 }
@@ -969,23 +982,33 @@ MediaStreamGraphImpl::OpenAudioInput(Cub
       mGraph->OpenAudioInputImpl(mID, mListener);
     }
     MediaStreamGraphImpl *mGraph;
     // aID is a cubeb_devid, and we assume that opaque ptr is valid until
     // we close cubeb.
     CubebUtils::AudioDeviceID mID;
     RefPtr<AudioDataListener> mListener;
   };
-  this->AppendMessage(new Message(this, aID, aListener));
+  if (!IsDestroyed()) {
+    this->AppendMessage(new Message(this, aID, aListener));
+  }
   return NS_OK;
 }
 
 void
 MediaStreamGraphImpl::CloseAudioInputImpl(AudioDataListener *aListener)
 {
+  uint32_t count;
+  DebugOnly<bool> result = mInputDeviceUsers.Get(aListener, &count);
+  MOZ_ASSERT(result);
+  if (--count > 0) {
+    mInputDeviceUsers.Put(aListener, count);
+    return; // still in use
+  }
+  mInputDeviceUsers.Remove(aListener);
   mInputDeviceID = nullptr;
   mInputWanted = false;
   CurrentDriver()->RemoveInputListener(aListener);
   mAudioInputs.RemoveElement(aListener);
 
   // Switch Drivers since we're adding or removing an input (to nothing/system or output only)
   bool audioTrackPresent = false;
   for (uint32_t i = 0; i < mStreams.Length(); ++i) {
@@ -2280,19 +2303,42 @@ MediaStream::AddMainThreadListener(MainT
 
     RefPtr<MediaStream> mStream;
   };
 
   RefPtr<nsRunnable> runnable = new NotifyRunnable(this);
   NS_WARN_IF(NS_FAILED(NS_DispatchToMainThread(runnable.forget())));
 }
 
+nsresult
+SourceMediaStream::OpenAudioInput(CubebUtils::AudioDeviceID aID,
+                                  AudioDataListener *aListener)
+{
+  if (GraphImpl()) {
+    mInputListener = aListener;
+    return GraphImpl()->OpenAudioInput(aID, aListener);
+  }
+  return NS_ERROR_FAILURE;
+}
+
+void
+SourceMediaStream::CloseAudioInput()
+{
+  // Destroy() may have run already and cleared this
+  if (GraphImpl() && mInputListener) {
+    GraphImpl()->CloseAudioInput(mInputListener);
+  }
+  mInputListener = nullptr;
+}
+
 void
 SourceMediaStream::DestroyImpl()
 {
+  CloseAudioInput();
+
   // Hold mMutex while mGraph is reset so that other threads holding mMutex
   // can null-check know that the graph will not destroyed.
   MutexAutoLock lock(mMutex);
   MediaStream::DestroyImpl();
 }
 
 void
 SourceMediaStream::SetPullEnabled(bool aEnabled)
--- a/dom/media/MediaStreamGraph.h
+++ b/dom/media/MediaStreamGraph.h
@@ -729,16 +729,26 @@ public:
     mPullEnabled(false),
     mUpdateFinished(false),
     mNeedsMixing(false)
   {}
 
   SourceMediaStream* AsSourceStream() override { return this; }
 
   // Media graph thread only
+
+  // Users of audio inputs go through the stream so it can track when the
+  // last stream referencing an input goes away, so it can close the cubeb
+  // input.  Also note: callable on any thread (though it bounces through
+  // MainThread to set the command if needed).
+  nsresult OpenAudioInput(CubebUtils::AudioDeviceID aID,
+                          AudioDataListener *aListener);
+  // Note: also implied when Destroy() happens
+  void CloseAudioInput();
+
   void DestroyImpl() override;
 
   // Call these on any thread.
   /**
    * Enable or disable pulling. When pulling is enabled, NotifyPull
    * gets called on MediaStreamListeners for this stream during the
    * MediaStreamGraph control loop. Pulling is initially disabled.
    * Due to unavoidable race conditions, after a call to SetPullEnabled(false)
@@ -915,16 +925,22 @@ protected:
    * Notify direct consumers of new data to one of the stream tracks.
    * The data doesn't have to be resampled (though it may be).  This is called
    * from AppendToTrack on the thread providing the data, and will call
    * the Listeners on this thread.
    */
   void NotifyDirectConsumers(TrackData *aTrack,
                              MediaSegment *aSegment);
 
+  // Only accessed on the MSG thread.  Used so to ask the MSGImpl to usecount
+  // users of a specific input.
+  // XXX Should really be a CubebUtils::AudioDeviceID, but they aren't
+  // copyable (opaque pointers)
+  RefPtr<AudioDataListener> mInputListener;
+
   // This must be acquired *before* MediaStreamGraphImpl's lock, if they are
   // held together.
   Mutex mMutex;
   // protected by mMutex
   StreamTime mUpdateKnownTracksTime;
   nsTArray<TrackData> mUpdateTracks;
   nsTArray<TrackData> mPendingTracks;
   nsTArray<RefPtr<MediaStreamDirectListener> > mDirectListeners;
--- a/dom/media/MediaStreamGraphImpl.h
+++ b/dom/media/MediaStreamGraphImpl.h
@@ -3,16 +3,18 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MOZILLA_MEDIASTREAMGRAPHIMPL_H_
 #define MOZILLA_MEDIASTREAMGRAPHIMPL_H_
 
 #include "MediaStreamGraph.h"
 
+#include "nsDataHashtable.h"
+
 #include "mozilla/Monitor.h"
 #include "mozilla/TimeStamp.h"
 #include "nsIMemoryReporter.h"
 #include "nsIThread.h"
 #include "nsIRunnable.h"
 #include "nsIAsyncShutdown.h"
 #include "Latency.h"
 #include "mozilla/WeakPtr.h"
@@ -623,16 +625,19 @@ public:
   /**
    * Devices to use for cubeb input & output, or NULL for no input (void*),
    * and boolean to control if we want input/output
    */
   bool mInputWanted;
   CubebUtils::AudioDeviceID mInputDeviceID;
   bool mOutputWanted;
   CubebUtils::AudioDeviceID mOutputDeviceID;
+  // Maps AudioDataListeners to a usecount of streams using the listener
+  // so we can know when it's no longer in use.
+  nsDataHashtable<nsPtrHashKey<AudioDataListener>, uint32_t> mInputDeviceUsers;
 
   // True if the graph needs another iteration after the current iteration.
   Atomic<bool> mNeedAnotherIteration;
   // GraphDriver may need a WakeUp() if something changes
   Atomic<bool> mGraphDriverAsleep;
 
   // mMonitor guards the data below.
   // MediaStreamGraph normally does its work without holding mMonitor, so it is
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -133,32 +133,32 @@ public:
   // Threadsafe because it's referenced from an MicrophoneSource, which can
   // had references to it on other threads.
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioInput)
 
   virtual int GetNumOfRecordingDevices(int& aDevices) = 0;
   virtual int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
                                      char aStrGuidUTF8[128]) = 0;
   virtual int GetRecordingDeviceStatus(bool& aIsAvailable) = 0;
-  virtual void StartRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener) = 0;
-  virtual void StopRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener) = 0;
+  virtual void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener) = 0;
+  virtual void StopRecording(SourceMediaStream *aStream) = 0;
   virtual int SetRecordingDevice(int aIndex) = 0;
 
 protected:
   // Protected destructor, to discourage deletion outside of Release():
   virtual ~AudioInput() {}
 
   webrtc::VoiceEngine* mVoiceEngine;
 };
 
 class AudioInputCubeb final : public AudioInput
 {
 public:
   explicit AudioInputCubeb(webrtc::VoiceEngine* aVoiceEngine, int aIndex = 0) :
-    AudioInput(aVoiceEngine), mSelectedDevice(aIndex), mInUse(false)
+    AudioInput(aVoiceEngine), mSelectedDevice(aIndex), mInUseCount(0)
   {
     if (!mDeviceIndexes) {
       mDeviceIndexes = new nsTArray<int>;
       mDeviceNames = new nsTArray<nsCString>;
     }
   }
 
   static void CleanupGlobalData()
@@ -209,50 +209,54 @@ public:
   int GetRecordingDeviceStatus(bool& aIsAvailable)
   {
     // With cubeb, we only expose devices of type CUBEB_DEVICE_TYPE_INPUT,
     // so unless it was removed, say it's available
     aIsAvailable = true;
     return 0;
   }
 
-  void StartRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener)
+  void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener)
   {
     MOZ_ASSERT(mDevices);
 
-    ScopedCustomReleasePtr<webrtc::VoEExternalMedia> ptrVoERender;
-    ptrVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
-    if (ptrVoERender) {
-      ptrVoERender->SetExternalRecordingStatus(true);
+    if (mInUseCount == 0) {
+      ScopedCustomReleasePtr<webrtc::VoEExternalMedia> ptrVoERender;
+      ptrVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
+      if (ptrVoERender) {
+        ptrVoERender->SetExternalRecordingStatus(true);
+      }
+      mAnyInUse = true;
     }
-    aGraph->OpenAudioInput(mDevices->device[mSelectedDevice]->devid, aListener);
-    mInUse = true;
-    mAnyInUse = true;
+    mInUseCount++;
+    // Always tell the stream we're using it for input
+    aStream->OpenAudioInput(mDevices->device[mSelectedDevice]->devid, aListener);
   }
 
-  void StopRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener)
+  void StopRecording(SourceMediaStream *aStream)
   {
-    aGraph->CloseAudioInput(aListener);
-    mInUse = false;
-    mAnyInUse = false;
+    aStream->CloseAudioInput();
+    if (--mInUseCount == 0) {
+      mAnyInUse = false;
+    }
   }
 
   int SetRecordingDevice(int aIndex)
   {
     int32_t devindex = DeviceIndex(aIndex);
     if (!mDevices || devindex < 0) {
       return 1;
     }
     mSelectedDevice = devindex;
     return 0;
   }
 
 protected:
   ~AudioInputCubeb() {
-    MOZ_RELEASE_ASSERT(!mInUse);
+    MOZ_RELEASE_ASSERT(mInUseCount == 0);
   }
 
 private:
   // It would be better to watch for device-change notifications
   void UpdateDeviceList()
   {
     cubeb_device_collection *devices = nullptr;
 
@@ -295,17 +299,17 @@ private:
 
   // We have an array, which consists of indexes to the current mDevices
   // list.  This is updated on mDevices updates.  Many devices in mDevices
   // won't be included in the array (wrong type, etc), or if a device is
   // removed it will map to -1 (and opens of this device will need to check
   // for this - and be careful of threading access.  The mappings need to
   // updated on each re-enumeration.
   int mSelectedDevice;
-  bool mInUse; // for assertions about listener lifetime
+  uint32_t mInUseCount;
 
   // pointers to avoid static constructors
   static nsTArray<int>* mDeviceIndexes;
   static nsTArray<nsCString>* mDeviceNames;
   static cubeb_device_collection *mDevices;
   static bool mAnyInUse;
 };
 
@@ -342,18 +346,18 @@ public:
     ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
     if (!ptrVoEHw)  {
       return 1;
     }
     ptrVoEHw->GetRecordingDeviceStatus(aIsAvailable);
     return 0;
   }
 
-  void StartRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener) {}
-  void StopRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener) {}
+  void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener) {}
+  void StopRecording(SourceMediaStream *aStream) {}
 
   int SetRecordingDevice(int aIndex)
   {
     ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
     ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
     if (!ptrVoEHw)  {
       return 1;
     }
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -353,16 +353,18 @@ MediaEngineWebRTCMicrophoneSource::Start
   aStream->AddAudioTrack(aID, mSampleFrequency, 0, segment, SourceMediaStream::ADDTRACK_QUEUED);
 
   // XXX Make this based on the pref.
   aStream->RegisterForAudioMixing();
   LOG(("Start audio for stream %p", aStream));
 
   if (mState == kStarted) {
     MOZ_ASSERT(aID == mTrackID);
+    // Make sure we're associated with this stream
+    mAudioInput->StartRecording(aStream, mListener);
     return NS_OK;
   }
   mState = kStarted;
   mTrackID = aID;
 
   // Make sure logger starts before capture
   AsyncLatencyLogger::Get(true);
 
@@ -396,29 +398,30 @@ MediaEngineWebRTCMicrophoneSource::Stop(
     if (!mSources.RemoveElement(aSource)) {
       // Already stopped - this is allowed
       return NS_OK;
     }
 
     aSource->EndTrack(aID);
 
     if (!mSources.IsEmpty()) {
+      mAudioInput->StopRecording(aSource);
       return NS_OK;
     }
     if (mState != kStarted) {
       return NS_ERROR_FAILURE;
     }
     if (!mVoEBase) {
       return NS_ERROR_FAILURE;
     }
 
     mState = kStopped;
   }
 
-  mAudioInput->StopRecording(aSource->Graph(), mListener);
+  mAudioInput->StopRecording(aSource);
 
   mVoERender->DeRegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel);
 
   if (mVoEBase->StopSend(mChannel)) {
     return NS_ERROR_FAILURE;
   }
   if (mVoEBase->StopReceive(mChannel)) {
     return NS_ERROR_FAILURE;