Bug 1397793 - Allow switching processing on/off dynamically. r?pehrsons draft
authorPaul Adenot <paul@paul.cx>
Thu, 23 Nov 2017 15:53:25 +0100
changeset 707566 16547d0ace0d0d7e791ef60249f94fe3060944b4
parent 707565 3fdab4238aeda44a12383d64941d0470e4c709ad
child 707567 f80414c0e9048ee5eb9754a0161844b5607addb8
push id92158
push userpaul@paul.cx
push dateTue, 05 Dec 2017 14:38:23 +0000
reviewerspehrsons
bugs1397793
milestone59.0a1
Bug 1397793 - Allow switching processing on/off dynamically. r?pehrsons MozReview-Commit-ID: G0NJRkKEVeM *** fold
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -503,22 +503,22 @@ private:
   void PacketizeAndProcess(MediaStreamGraph* aGraph,
                            const AudioDataValue* aBuffer,
                            size_t aFrames,
                            TrackRate aRate,
                            uint32_t aChannels);
 
   RefPtr<mozilla::AudioInput> mAudioInput;
   RefPtr<WebRTCAudioDataListener> mListener;
-  RefPtr<AudioOutputObserver> mAudioOutputObserver;
 
   // Note: shared across all microphone sources
   static int sChannelsOpen;
 
   const UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
+  const RefPtr<AudioOutputObserver> mAudioOutputObserver;
 
   // accessed from the GraphDriver thread except for deletion
   nsAutoPtr<AudioPacketizer<AudioDataValue, float>> mPacketizer;
   ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERenderListener;
 
   // mMonitor protects mSources[] and mPrinicpalIds[] access/changes, and
   // transitions of mState from kStarted to kStopped (which are combined with
   // EndTrack()). mSources[] and mPrincipalHandles[] are accessed from webrtc
@@ -538,17 +538,17 @@ private:
 
   int32_t mSampleFrequency;
   uint64_t mTotalFrames;
   uint64_t mLastLogFrames;
 
   // mSkipProcessing is true if none of the processing passes are enabled,
   // because of prefs or constraints. This allows simply copying the audio into
   // the MSG, skipping resampling and the whole webrtc.org code.
-  bool mSkipProcessing;
+  std::atomic_bool mSkipProcessing;
 
   // To only update microphone when needed, we keep track of previous settings.
   MediaEnginePrefs mLastPrefs;
 
   AlignedFloatBuffer mInputBuffer;
   AlignedFloatBuffer mDeinterleavedBuffer;
   AlignedAudioBuffer mInputDownmixBuffer;
 };
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -571,17 +571,17 @@ MediaEngineWebRTCMicrophoneSource::Start
     mPrincipalHandles.AppendElement(aPrincipalHandle);
     MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
   }
 
   AudioSegment* segment = new AudioSegment();
   if (mSampleFrequency == MediaEngine::USE_GRAPH_RATE) {
     mSampleFrequency = aStream->GraphRate();
   }
-  aStream->AddAudioTrack(aID, mSampleFrequency, 0, segment, SourceMediaStream::ADDTRACK_QUEUED);
+  aStream->AddAudioTrack(aID, aStream->GraphRate(), 0, segment, SourceMediaStream::ADDTRACK_QUEUED);
 
   // XXX Make this based on the pref.
   aStream->RegisterForAudioMixing();
   LOG(("Start audio for stream %p", aStream));
 
   if (!mListener) {
     mListener = new mozilla::WebRTCAudioDataListener(this);
   }
@@ -592,19 +592,17 @@ MediaEngineWebRTCMicrophoneSource::Start
     return NS_OK;
   }
   mState = kStarted;
   mTrackID = aID;
 
   // Make sure logger starts before capture
   AsyncLatencyLogger::Get(true);
 
-  if (mAudioOutputObserver) {
-    mAudioOutputObserver->Clear();
-  }
+  mAudioOutputObserver->Clear();
 
   mAudioInput->StartRecording(aStream, mListener);
 
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Stop(SourceMediaStream *aSource, TrackID aID)
@@ -658,17 +656,17 @@ MediaEngineWebRTCMicrophoneSource::Notif
 
 void
 MediaEngineWebRTCMicrophoneSource::NotifyOutputData(MediaStreamGraph* aGraph,
                                                     AudioDataValue* aBuffer,
                                                     size_t aFrames,
                                                     TrackRate aRate,
                                                     uint32_t aChannels)
 {
-  if (mAudioOutputObserver) {
+  if (!PassThrough()) {
     mAudioOutputObserver->InsertFarEnd(aBuffer, aFrames, false,
                                   aRate, aChannels);
   }
 }
 
 // Only called if we're not in passthrough mode
 void
 MediaEngineWebRTCMicrophoneSource::PacketizeAndProcess(MediaStreamGraph* aGraph,
@@ -697,17 +695,17 @@ MediaEngineWebRTCMicrophoneSource::Packe
       free(mAudioOutputObserver->Pop()); // only call if size() > 0
     }
   }
 
   // Feed the far-end audio data (speakers) to the feedback input of the AEC.
   while (mAudioOutputObserver->Size() > 0) {
     // Bug 1414837: This will call `free()`, and we should remove it.
     // Pop gives ownership.
-    UniquePtr<FarEndAudioChunk> buffer(mAudioOutputObserver->Pop()); // only call if size() > 0
+    nsAutoPtr<FarEndAudioChunk> buffer(mAudioOutputObserver->Pop()); // only call if size() > 0
     if (!buffer) {
       continue;
     }
     AudioDataValue* packetDataPointer = buffer->mData;
     AutoTArray<AudioDataValue*, MAX_CHANNELS> deinterleavedPacketDataChannelPointers;
     AudioDataValue* interleavedFarend = nullptr;
     uint32_t channelCountFarend = 0;
     uint32_t framesPerPacketFarend = 0;