Bug 1428392 - Rename the MediaEngineWebRTCMicrophoneSource packetizer to indicate it's packetizing the input data (microphone). draft
authorPaul Adenot <paul@paul.cx>
Fri, 05 Jan 2018 17:19:22 +0100
changeset 716396 41096d7a9cb4da940945d2e82bfe4ec667ab7bf3
parent 716325 81362f7306fe413b19fdba27cd0e9a5525d902e1
child 716397 b9cb2d32cf2a5fdca584044202c1598b3abbe0a0
push id94426
push userpaul@paul.cx
push dateFri, 05 Jan 2018 17:25:21 +0000
bugs1428392
milestone59.0a1
Bug 1428392 - Rename the MediaEngineWebRTCMicrophoneSource packetizer to indicate it's packetizing the input data (microphone). MozReview-Commit-ID: AcjAeXdN8iA
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -520,17 +520,17 @@ private:
 
   // Note: shared across all microphone sources
   static int sChannelsOpen;
 
   const UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
   const RefPtr<AudioOutputObserver> mAudioOutputObserver;
 
   // accessed from the GraphDriver thread except for deletion
-  nsAutoPtr<AudioPacketizer<AudioDataValue, float>> mPacketizer;
+  nsAutoPtr<AudioPacketizer<AudioDataValue, float>> mPacketizerInput;
   ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERenderListener;
 
   // mMonitor protects mSources[] and mPrinicpalIds[] access/changes, and
   // transitions of mState from kStarted to kStopped (which are combined with
   // EndTrack()). mSources[] and mPrincipalHandles[] are accessed from webrtc
   // threads.
   Monitor mMonitor;
   nsTArray<RefPtr<SourceMediaStream>> mSources;
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -709,21 +709,21 @@ MediaEngineWebRTCMicrophoneSource::Packe
                                                        const AudioDataValue* aBuffer,
                                                        size_t aFrames,
                                                        TrackRate aRate,
                                                        uint32_t aChannels)
 {
   MOZ_ASSERT(!PassThrough(), "This should be bypassed when in PassThrough mode.");
   size_t offset = 0;
 
-  if (!mPacketizer ||
-      mPacketizer->PacketSize() != aRate/100u ||
-      mPacketizer->Channels() != aChannels) {
+  if (!mPacketizerInput ||
+      mPacketizerInput->PacketSize() != aRate/100u ||
+      mPacketizerInput->Channels() != aChannels) {
     // It's ok to drop the audio still in the packetizer here.
-    mPacketizer =
+    mPacketizerInput =
       new AudioPacketizer<AudioDataValue, float>(aRate/100, aChannels);
   }
 
   // On initial capture, throw away all far-end data except the most recent sample
   // since it's already irrelevant and we want to keep avoid confusing the AEC far-end
   // input code with "old" audio.
   if (!mStarted) {
     mStarted  = true;
@@ -809,68 +809,68 @@ MediaEngineWebRTCMicrophoneSource::Packe
       MOZ_LOG(GetMediaManagerLog(), LogLevel::Error,
           ("error in audio ProcessReverseStream(): %d", err));
       return;
     }
   }
 
   // Packetize our input data into 10ms chunks, deinterleave into planar channel
   // buffers, process, and append to the right MediaStreamTrack.
-  mPacketizer->Input(aBuffer, static_cast<uint32_t>(aFrames));
+  mPacketizerInput->Input(aBuffer, static_cast<uint32_t>(aFrames));
 
-  while (mPacketizer->PacketsAvailable()) {
-    uint32_t samplesPerPacket = mPacketizer->PacketSize() *
-      mPacketizer->Channels();
+  while (mPacketizerInput->PacketsAvailable()) {
+    uint32_t samplesPerPacket = mPacketizerInput->PacketSize() *
+      mPacketizerInput->Channels();
     if (mInputBuffer.Length() < samplesPerPacket) {
       mInputBuffer.SetLength(samplesPerPacket);
     }
     if (mDeinterleavedBuffer.Length() < samplesPerPacket) {
       mDeinterleavedBuffer.SetLength(samplesPerPacket);
     }
     float* packet = mInputBuffer.Data();
-    mPacketizer->Output(packet);
+    mPacketizerInput->Output(packet);
 
     // Deinterleave the input data
     // Prepare an array pointing to deinterleaved channels.
     AutoTArray<float*, 8> deinterleavedPacketizedInputDataChannelPointers;
     deinterleavedPacketizedInputDataChannelPointers.SetLength(aChannels);
     offset = 0;
     for (size_t i = 0; i < deinterleavedPacketizedInputDataChannelPointers.Length(); ++i) {
       deinterleavedPacketizedInputDataChannelPointers[i] = mDeinterleavedBuffer.Data() + offset;
-      offset += mPacketizer->PacketSize();
+      offset += mPacketizerInput->PacketSize();
     }
 
     // Deinterleave to mInputBuffer, pointed to by inputBufferChannelPointers.
-    Deinterleave(packet, mPacketizer->PacketSize(), aChannels,
+    Deinterleave(packet, mPacketizerInput->PacketSize(), aChannels,
         deinterleavedPacketizedInputDataChannelPointers.Elements());
 
     StreamConfig inputConfig(aRate,
                              aChannels,
                              false /* we don't use typing detection*/);
     StreamConfig outputConfig = inputConfig;
 
     // Bug 1404965: Get the right delay here, it saves some work down the line.
     mAudioProcessing->set_stream_delay_ms(0);
 
     // Bug 1414837: find a way to not allocate here.
     RefPtr<SharedBuffer> buffer =
-      SharedBuffer::Create(mPacketizer->PacketSize() * aChannels * sizeof(float));
+      SharedBuffer::Create(mPacketizerInput->PacketSize() * aChannels * sizeof(float));
     AudioSegment segment;
 
     // Prepare channel pointers to the SharedBuffer created above.
     AutoTArray<float*, 8> processedOutputChannelPointers;
     AutoTArray<const float*, 8> processedOutputChannelPointersConst;
     processedOutputChannelPointers.SetLength(aChannels);
     processedOutputChannelPointersConst.SetLength(aChannels);
 
     offset = 0;
     for (size_t i = 0; i < processedOutputChannelPointers.Length(); ++i) {
       processedOutputChannelPointers[i] = static_cast<float*>(buffer->Data()) + offset;
       processedOutputChannelPointersConst[i] = static_cast<float*>(buffer->Data()) + offset;
-      offset += mPacketizer->PacketSize();
+      offset += mPacketizerInput->PacketSize();
     }
 
     mAudioProcessing->ProcessStream(deinterleavedPacketizedInputDataChannelPointers.Elements(),
                                     inputConfig,
                                     outputConfig,
                                     processedOutputChannelPointers.Elements());
     MonitorAutoLock lock(mMonitor);
     if (mState != kStarted)
@@ -882,17 +882,17 @@ MediaEngineWebRTCMicrophoneSource::Packe
       }
 
       // We already have planar audio data of the right format. Insert into the
       // MSG.
       MOZ_ASSERT(processedOutputChannelPointers.Length() == aChannels);
       RefPtr<SharedBuffer> other = buffer;
       segment.AppendFrames(other.forget(),
                            processedOutputChannelPointersConst,
-                           mPacketizer->PacketSize(),
+                           mPacketizerInput->PacketSize(),
                            mPrincipalHandles[i]);
       mSources[i]->AppendToTrack(mTrackID, &segment);
     }
   }
 }
 
 template<typename T>
 void