Bug 1378070 - Apply review comments. r?padenot,jya draft
authorAlex Chronopoulos <achronop@gmail.com>
Thu, 03 Aug 2017 18:32:34 +0300
changeset 620555 e60db4538cd751ca2faa0472ac0f2c182e3acd63
parent 620554 e67b1a5f04e6c49c29a4aaffcd57213e9271feba
child 640739 752bff3f0957f26a98a5674dc7236aa9932672b0
push id72082
push userachronop@gmail.com
push dateThu, 03 Aug 2017 15:31:31 +0000
reviewerspadenot, jya
bugs1378070
milestone56.0a1
Bug 1378070 - Apply review comments. r?padenot,jya MozReview-Commit-ID: RI4Gn53ypm
dom/media/MediaStreamGraphImpl.h
dom/media/VideoUtils.cpp
dom/media/VideoUtils.h
dom/media/gtest/TestAudioBuffers.cpp
dom/media/webrtc/AudioOutputObserver.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
--- a/dom/media/MediaStreamGraphImpl.h
+++ b/dom/media/MediaStreamGraphImpl.h
@@ -446,17 +446,16 @@ public:
   /**
    * Mark the media stream order as dirty.
    */
   void SetStreamOrderDirty()
   {
     mStreamOrderDirty = true;
   }
 
-  // Always stereo for now.
   uint32_t AudioChannelCount() const
   {
     return std::min<uint32_t>(8, CubebUtils::MaxNumberOfChannels());
   }
 
   double MediaTimeToSeconds(GraphTime aTime) const
   {
     NS_ASSERTION(aTime > -STREAM_TIME_MAX && aTime <= STREAM_TIME_MAX,
--- a/dom/media/VideoUtils.cpp
+++ b/dom/media/VideoUtils.cpp
@@ -138,39 +138,16 @@ media::TimeIntervals GetEstimatedBuffere
         media::TimeInterval(TimeUnit::FromMicroseconds(startUs),
                             TimeUnit::FromMicroseconds(endUs));
     }
     startOffset = aStream->GetNextCachedData(endOffset);
   }
   return buffered;
 }
 
-void DownmixToStereo(const mozilla::AudioDataValue* aBuffer,
-                     const uint32_t inChannels,
-                     const uint32_t aFrames,
-                     mozilla::AudioDataValue* aOutBuffer)
-{
-  MOZ_ASSERT(aBuffer);
-  const int channels = 2;
-  MOZ_ASSERT(inChannels > channels);
-  for (uint32_t fIdx = 0; fIdx < aFrames; ++fIdx) {
-#ifdef MOZ_SAMPLE_TYPE_FLOAT32
-    float sample = 0.0;
-#else
-    int sample = 0;
-#endif
-    for (uint32_t ch = channels; ch < inChannels; ++ch) {
-      // The sample of the buffer would be interleaved.
-      sample += aBuffer[fIdx*channels] / inChannels;
-    }
-    aOutBuffer[fIdx] = aBuffer[fIdx] + sample;
-    aOutBuffer[fIdx + 1] = aBuffer[fIdx + 1] + sample;
-  }
-}
-
 void DownmixStereoToMono(mozilla::AudioDataValue* aBuffer,
                          uint32_t aFrames)
 {
   MOZ_ASSERT(aBuffer);
   const int channels = 2;
   for (uint32_t fIdx = 0; fIdx < aFrames; ++fIdx) {
 #ifdef MOZ_SAMPLE_TYPE_FLOAT32
     float sample = 0.0;
--- a/dom/media/VideoUtils.h
+++ b/dom/media/VideoUtils.h
@@ -150,22 +150,16 @@ nsresult SecondsToUsecs(double aSeconds,
 // before being used!
 void ScaleDisplayByAspectRatio(nsIntSize& aDisplay, float aAspectRatio);
 
 // Downmix Stereo audio samples to Mono.
 // Input are the buffer contains stereo data and the number of frames.
 void DownmixStereoToMono(mozilla::AudioDataValue* aBuffer,
                          uint32_t aFrames);
 
-void DownmixToStereo(const mozilla::AudioDataValue* aBuffer,
-                     const uint32_t inChannels,
-                     const uint32_t aFrames,
-                     mozilla::AudioDataValue* aOutBuffer);
-
-
 bool IsVideoContentType(const nsCString& aContentType);
 
 // Returns true if it's safe to use aPicture as the picture to be
 // extracted inside a frame of size aFrame, and scaled up to and displayed
 // at a size of aDisplay. You should validate the frame, picture, and
 // display regions before using them to display video frames.
 bool IsValidVideoRegion(const nsIntSize& aFrame, const nsIntRect& aPicture,
                         const nsIntSize& aDisplay);
--- a/dom/media/gtest/TestAudioBuffers.cpp
+++ b/dom/media/gtest/TestAudioBuffers.cpp
@@ -1,70 +1,59 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include <stdint.h>
 #include "AudioBufferUtils.h"
 #include "gtest/gtest.h"
+#include <vector>
 
 const uint32_t FRAMES = 256;
 
 void test_for_number_of_channels(const uint32_t channels)
 {
   const uint32_t samples = channels * FRAMES;
 
   mozilla::AudioCallbackBufferWrapper<float> mBuffer(channels);
   mozilla::SpillBuffer<float, 128> b(channels);
-#if defined(_WIN32) || defined(WIN32)
-  /* Variable length array is not allowed in windows compiler
-   * (C2131: expression did not evaluate to a constant) */
-  float * fromCallback = new float[samples];
-  float * other = new float[samples];
-#else
-  float fromCallback[samples];
-  float other[samples];
-#endif
-
-  for (uint32_t i = 0; i < samples; i++) {
-    other[i] = 1.0;
-    fromCallback[i] = 0.0;
-  }
+  std::vector<float> fromCallback(samples, 0.0);
+  std::vector<float> other(samples, 1.0);
 
   // Set the buffer in the wrapper from the callback
-  mBuffer.SetBuffer(fromCallback, FRAMES);
+  mBuffer.SetBuffer(fromCallback.data(), FRAMES);
 
   // Fill the SpillBuffer with data.
-  ASSERT_TRUE(b.Fill(other, 15) == 15);
-  ASSERT_TRUE(b.Fill(other, 17) == 17);
+  ASSERT_TRUE(b.Fill(other.data(), 15) == 15);
+  ASSERT_TRUE(b.Fill(other.data(), 17) == 17);
   for (uint32_t i = 0; i < 32 * channels; i++) {
     other[i] = 0.0;
   }
 
   // Empty it in the AudioCallbackBufferWrapper
   ASSERT_TRUE(b.Empty(mBuffer) == 32);
 
-  // Check available return something reasonnable
+  // Check available return something reasonable
   ASSERT_TRUE(mBuffer.Available() == FRAMES - 32);
 
   // Fill the buffer with the rest of the data
-  mBuffer.WriteFrames(other + 32 * channels, FRAMES - 32);
+  mBuffer.WriteFrames(other.data() + 32 * channels, FRAMES - 32);
 
   // Check the buffer is now full
   ASSERT_TRUE(mBuffer.Available() == 0);
 
   for (uint32_t i = 0 ; i < samples; i++) {
     ASSERT_TRUE(fromCallback[i] == 1.0) <<
       "Difference at " << i << " (" << fromCallback[i] << " != " << 1.0 <<
       ")\n";
   }
 
-  ASSERT_TRUE(b.Fill(other, FRAMES) == 128);
-  ASSERT_TRUE(b.Fill(other, FRAMES) == 0);
+  ASSERT_TRUE(b.Fill(other.data(), FRAMES) == 128);
+  ASSERT_TRUE(b.Fill(other.data(), FRAMES) == 0);
   ASSERT_TRUE(b.Empty(mBuffer) == 0);
 }
 
 TEST(AudioBuffers, Test)
 {
   for (uint32_t ch = 1; ch <= 8; ++ch) {
     test_for_number_of_channels(ch);
   }
--- a/dom/media/webrtc/AudioOutputObserver.h
+++ b/dom/media/webrtc/AudioOutputObserver.h
@@ -3,16 +3,17 @@
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef AUDIOOUTPUTOBSERVER_H_
 #define AUDIOOUTPUTOBSERVER_H_
 
 #include "mozilla/StaticPtr.h"
 #include "nsAutoPtr.h"
 #include "AudioMixer.h"
+#include "MediaData.h"
 
 namespace webrtc {
 class SingleRwFifo;
 }
 
 namespace mozilla {
 
 typedef struct FarEndAudioChunk_ {
@@ -45,16 +46,16 @@ private:
   uint32_t mPlayoutChannels;
 
   nsAutoPtr<webrtc::SingleRwFifo> mPlayoutFifo;
   uint32_t mChunkSize;
 
   // chunking to 10ms support
   FarEndAudioChunk *mSaved; // can't be nsAutoPtr since we need to use free(), not delete
   uint32_t mSamplesSaved;
-  AudioDataValue mDownmixBuffer[480 * 2];// big enough to hold 10ms in max rate [MAX_SAMPLING_FREQ * MAX_CHANNELS / 100]
+  AlignedAudioBuffer mDownmixBuffer;
 };
 
 extern StaticRefPtr<AudioOutputObserver> gFarendObserver;
 
 }
 
 #endif
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -4,16 +4,17 @@
 
 #include "MediaEngineWebRTC.h"
 #include <stdio.h>
 #include <algorithm>
 #include "mozilla/Assertions.h"
 #include "MediaTrackConstraints.h"
 #include "mtransport/runnable_utils.h"
 #include "nsAutoPtr.h"
+#include "AudioConverter.h"
 
 // scoped_ptr.h uses FF
 #ifdef FF
 #undef FF
 #endif
 #include "webrtc/modules/audio_device/opensl/single_rw_fifo.h"
 
 #define CHANNELS 1
@@ -58,16 +59,17 @@ ScopedCustomReleasePtr<webrtc::VoENetwor
 ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> MediaEngineWebRTCMicrophoneSource::mVoEProcessing;
 
 AudioOutputObserver::AudioOutputObserver()
   : mPlayoutFreq(0)
   , mPlayoutChannels(0)
   , mChunkSize(0)
   , mSaved(nullptr)
   , mSamplesSaved(0)
+  , mDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100)
 {
   // Buffers of 10ms chunks
   mPlayoutFifo = new webrtc::SingleRwFifo(MAX_AEC_FIFO_DEPTH/10);
 }
 
 AudioOutputObserver::~AudioOutputObserver()
 {
   Clear();
@@ -98,20 +100,18 @@ AudioOutputObserver::Size()
 
 // static
 void
 AudioOutputObserver::InsertFarEnd(const AudioDataValue *aBuffer, uint32_t aFrames, bool aOverran,
                                   int aFreq, int aChannels)
 {
   // Prepare for downmix if needed
   int channels = aChannels;
-  AudioDataValue * buffer = const_cast<AudioDataValue*>(aBuffer);
   if (aChannels > MAX_CHANNELS) {
     channels = MAX_CHANNELS;
-    buffer = mDownmixBuffer;
   }
 
   if (mPlayoutChannels != 0) {
     if (mPlayoutChannels != static_cast<uint32_t>(channels)) {
       MOZ_CRASH();
     }
   } else {
     MOZ_ASSERT(channels <= MAX_CHANNELS);
@@ -148,21 +148,24 @@ AudioOutputObserver::InsertFarEnd(const 
       mSaved->mOverrun = aOverran;
       aOverran = false;
     }
     uint32_t to_copy = mChunkSize - mSamplesSaved;
     if (to_copy > aFrames) {
       to_copy = aFrames;
     }
 
+    int16_t* dest = &(mSaved->mData[mSamplesSaved * channels]);
     if (aChannels > MAX_CHANNELS) {
-      DownmixToStereo(aBuffer, aChannels, to_copy, buffer);
+      AudioConverter converter(AudioConfig(aChannels, 0), AudioConfig(channels, 0));
+      converter.Process(mDownmixBuffer, aBuffer, to_copy);
+      ConvertAudioSamples(mDownmixBuffer.Data(), dest, to_copy * channels);
+    } else {
+      ConvertAudioSamples(aBuffer, dest, to_copy * channels);
     }
-    int16_t *dest = &(mSaved->mData[mSamplesSaved * channels]);
-    ConvertAudioSamples(buffer, dest, to_copy * channels);
 
 #ifdef LOG_FAREND_INSERTION
     if (fp) {
       fwrite(&(mSaved->mData[mSamplesSaved * aChannels]), to_copy * aChannels, sizeof(int16_t), fp);
     }
 #endif
     aFrames -= to_copy;
     mSamplesSaved += to_copy;