Bug 971528 - Expect stereo input in MediaEngineWebRTCMicrophoneSource. r?padenot draft
authorAlex Chronopoulos <achronop@gmail.com>
Fri, 02 Jun 2017 09:12:08 +0300
changeset 588111 cafb069d6d2cf15f4d76cc8fd8592ededeb28533
parent 588110 a8fb5a1d1243a8920a80350ca176e3c2d7af350e
child 588112 dee434b4dc46b13bd78191dcceab1e15a848468e
push id61914
push userachronop@gmail.com
push dateFri, 02 Jun 2017 06:39:03 +0000
reviewerspadenot
bugs971528
milestone55.0a1
Bug 971528 - Expect stereo input in MediaEngineWebRTCMicrophoneSource. r?padenot MozReview-Commit-ID: H8g9JW1xgP9
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -595,34 +595,53 @@ MediaEngineWebRTCMicrophoneSource::Inser
     }
   }
 
   size_t len = mSources.Length();
   for (size_t i = 0; i < len; i++) {
     if (!mSources[i]) {
       continue;
     }
-    RefPtr<SharedBuffer> buffer =
-      SharedBuffer::Create(aFrames * aChannels * sizeof(T));
-    PodCopy(static_cast<T*>(buffer->Data()),
-            aBuffer, aFrames * aChannels);
 
     TimeStamp insertTime;
     // Make sure we include the stream and the track.
     // The 0:1 is a flag to note when we've done the final insert for a given input block.
     LogTime(AsyncLatencyLogger::AudioTrackInsertion,
             LATENCY_STREAM_ID(mSources[i].get(), mTrackID),
             (i+1 < len) ? 0 : 1, insertTime);
 
+    // Bug 971528 - Support stereo capture in gUM
+    MOZ_ASSERT(aChannels == 1 || aChannels == 2,
+        "GraphDriver only supports mono and stereo audio for now");
+
     nsAutoPtr<AudioSegment> segment(new AudioSegment());
-    AutoTArray<const T*, 1> channels;
-    // XXX Bug 971528 - Support stereo capture in gUM
-    MOZ_ASSERT(aChannels == 1,
-        "GraphDriver only supports us stereo audio for now");
-    channels.AppendElement(static_cast<T*>(buffer->Data()));
+    RefPtr<SharedBuffer> buffer =
+      SharedBuffer::Create(aFrames * aChannels * sizeof(T));
+    AutoTArray<const T*, 8> channels;
+    channels.SetLength(aChannels);
+    if (aChannels == 1) {
+      PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames);
+      channels.AppendElement(static_cast<T*>(buffer->Data()));
+    } else {
+      AutoTArray<T*, 8> write_channels;
+      write_channels.SetLength(aChannels);
+      T * samples = static_cast<T*>(buffer->Data());
+
+      size_t offset = 0;
+      for(uint32_t i = 0; i < aChannels; ++i) {
+        channels[i] = write_channels[i] = samples + offset;
+        offset += aFrames;
+      }
+
+      DeinterleaveAndConvertBuffer(aBuffer,
+                                   aFrames,
+                                   aChannels,
+                                   write_channels.Elements());
+    }
+
     segment->AppendFrames(buffer.forget(), channels, aFrames,
                          mPrincipalHandles[i]);
     segment->GetStartTime(insertTime);
 
     mSources[i]->AppendToTrack(mTrackID, segment);
   }
 }