Bug 1208371 - Don't treat audio chunks as mutable in MediaPipeline. r?padenot draft
authorAndreas Pehrson <pehrsons@gmail.com>
Thu, 10 Mar 2016 15:36:10 +0100
changeset 347671 d93c273b2d1e476027b8435127e199bdaa59ecef
parent 347670 4388571fa228001ecd479975eb2ed245bdd94b9f
child 347672 f7045b1a9acd0ac8ca1b63ff7936c58ec6a6db90
push id14642
push userpehrsons@gmail.com
push dateTue, 05 Apr 2016 16:45:34 +0000
reviewerspadenot
bugs1208371
milestone47.0a1
Bug 1208371 - Don't treat audio chunks as mutable in MediaPipeline. r?padenot MozReview-Commit-ID: 26VPBK2WOsB
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -1180,44 +1180,35 @@ void MediaPipelineTransmit::PipelineList
 
   // Convert to interleaved, 16-bits integer audio, with a maximum of two
   // channels (since the WebRTC.org code below makes the assumption that the
   // input audio is either mono or stereo).
   uint32_t outputChannels = chunk.ChannelCount() == 1 ? 1 : 2;
   const int16_t* samples = nullptr;
   UniquePtr<int16_t[]> convertedSamples;
 
-  // If this track is not enabled, simply ignore the data in the chunk.
-  if (!enabled_) {
-    chunk.mBufferFormat = AUDIO_FORMAT_SILENCE;
-  }
-
   // We take advantage of the fact that the common case (microphone directly to
   // PeerConnection, that is, a normal call), the samples are already 16-bits
   // mono, so the representation in interleaved and planar is the same, and we
   // can just use that.
-  if (outputChannels == 1 && chunk.mBufferFormat == AUDIO_FORMAT_S16) {
+  if (enabled_ && outputChannels == 1 && chunk.mBufferFormat == AUDIO_FORMAT_S16) {
     samples = chunk.ChannelData<int16_t>().Elements()[0];
   } else {
     convertedSamples = MakeUnique<int16_t[]>(chunk.mDuration * outputChannels);
 
-    switch (chunk.mBufferFormat) {
-        case AUDIO_FORMAT_FLOAT32:
-          DownmixAndInterleave(chunk.ChannelData<float>(),
-                               chunk.mDuration, chunk.mVolume, outputChannels,
-                               convertedSamples.get());
-          break;
-        case AUDIO_FORMAT_S16:
-          DownmixAndInterleave(chunk.ChannelData<int16_t>(),
-                               chunk.mDuration, chunk.mVolume, outputChannels,
-                               convertedSamples.get());
-          break;
-        case AUDIO_FORMAT_SILENCE:
-          PodZero(convertedSamples.get(), chunk.mDuration * outputChannels);
-          break;
+    if (!enabled_ || chunk.mBufferFormat == AUDIO_FORMAT_SILENCE) {
+      PodZero(convertedSamples.get(), chunk.mDuration * outputChannels);
+    } else if (chunk.mBufferFormat == AUDIO_FORMAT_FLOAT32) {
+      DownmixAndInterleave(chunk.ChannelData<float>(),
+                           chunk.mDuration, chunk.mVolume, outputChannels,
+                           convertedSamples.get());
+    } else if (chunk.mBufferFormat == AUDIO_FORMAT_S16) {
+      DownmixAndInterleave(chunk.ChannelData<int16_t>(),
+                           chunk.mDuration, chunk.mVolume, outputChannels,
+                           convertedSamples.get());
     }
     samples = convertedSamples.get();
   }
 
   MOZ_ASSERT(!(rate%100)); // rate should be a multiple of 100
 
   // Check if the rate or the number of channels has changed since the last time
   // we came through. I realize it may be overkill to check if the rate has