Bug 1408294 - Append null audio data when audio input underruns. r?padenot draft
authorAndreas Pehrson <pehrsons@mozilla.com>
Wed, 22 Nov 2017 14:30:00 +0100
changeset 705085 aa0d667d559b9eb64b4c1b8952ef19bf808a1aa0
parent 705084 1571dfc0fe48efff799fa0f91eca0ddf359ccc06
child 705086 e444743db9705df4bbd9fe22740da032b481cc34
push id91355
push userbmo:apehrson@mozilla.com
push dateWed, 29 Nov 2017 13:15:51 +0000
reviewerspadenot
bugs1408294
milestone59.0a1
Bug 1408294 - Append null audio data when audio input underruns. r?padenot MozReview-Commit-ID: 3bNTZRhv839
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/webrtc/moz.build
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -631,16 +631,21 @@ private:
   nsString mDeviceName;
   nsCString mDeviceUUID;
 
   int32_t mSampleFrequency;
   uint64_t mTotalFrames;
   uint64_t mLastLogFrames;
   int32_t mPlayoutDelay;
 
+#ifdef DEBUG
+  // The GraphDriver::CurrentTime() of the last time we appended data.
+  TimeStamp mLastAppendTime;
+#endif
+
   NullTransport *mNullTransport;
 
   nsTArray<int16_t> mInputBuffer;
   // mSkipProcessing is true if none of the processing passes are enabled,
   // because of prefs or constraints. This allows simply copying the audio into
   // the MSG, skipping resampling and the whole webrtc.org code.
   bool mSkipProcessing;
 
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -1,20 +1,23 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MediaEngineWebRTC.h"
+
 #include <stdio.h>
 #include <algorithm>
+
+#include "AudioConverter.h"
+#include "MediaStreamGraphImpl.h"
+#include "MediaTrackConstraints.h"
 #include "mozilla/Assertions.h"
-#include "MediaTrackConstraints.h"
 #include "mtransport/runnable_utils.h"
 #include "nsAutoPtr.h"
-#include "AudioConverter.h"
 
 // scoped_ptr.h uses FF
 #ifdef FF
 #undef FF
 #endif
 #include "webrtc/modules/audio_device/opensl/single_rw_fifo.h"
 
 #define CHANNELS 1
@@ -203,16 +206,19 @@ MediaEngineWebRTCMicrophoneSource::Media
   , mDelayAgnostic(aDelayAgnostic)
   , mExtendedFilter(aExtendedFilter)
   , mTrackID(TRACK_NONE)
   , mStarted(false)
   , mSampleFrequency(MediaEngine::DEFAULT_SAMPLE_RATE)
   , mTotalFrames(0)
   , mLastLogFrames(0)
   , mPlayoutDelay(0)
+#if DEBUG
+  , mLastAppendTime(TimeStamp::Now())
+#endif
   , mNullTransport(nullptr)
   , mSkipProcessing(false)
   , mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100)
 {
   MOZ_ASSERT(aVoiceEnginePtr);
   MOZ_ASSERT(aAudioInput);
   mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
   mDeviceUUID.Assign(uuid);
@@ -590,18 +596,35 @@ MediaEngineWebRTCMicrophoneSource::Stop(
 
 void
 MediaEngineWebRTCMicrophoneSource::NotifyPull(MediaStreamGraph *aGraph,
                                               SourceMediaStream *aSource,
                                               TrackID aID,
                                               StreamTime aDesiredTime,
                                               const PrincipalHandle& aPrincipalHandle)
 {
-  // Ignore - we push audio data
   LOG_FRAMES(("NotifyPull, desired = %" PRId64, (int64_t) aDesiredTime));
+
+  StreamTime delta = aDesiredTime - aSource->GetEndOfAppendedData(aID);
+  if (delta > 0) {
+    // Not enough data has been pushed so we fill it with silence.
+    // This could be due to underruns or because we have been stopped.
+
+#ifdef DEBUG
+  MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(aGraph);
+  TimeStamp currentTime = graph->CurrentDriver()->GetCurrentTimeStamp();
+  MOZ_ASSERT(currentTime > mLastAppendTime,
+             "CurrentTime didn't advance since last append");
+  mLastAppendTime = currentTime;
+#endif
+
+    AudioSegment audio;
+    audio.AppendNullData(delta);
+    aSource->AppendToTrack(aID, &audio);
+  }
 }
 
 void
 MediaEngineWebRTCMicrophoneSource::NotifyOutputData(MediaStreamGraph* aGraph,
                                                     AudioDataValue* aBuffer,
                                                     size_t aFrames,
                                                     TrackRate aRate,
                                                     uint32_t aChannels)
@@ -668,16 +691,26 @@ MediaEngineWebRTCMicrophoneSource::Inser
     if (mTotalFrames > mLastLogFrames + mSampleFrequency) { // ~ 1 second
       MOZ_LOG(AudioLogModule(), LogLevel::Debug,
               ("%p: Inserting %zu samples into graph, total frames = %" PRIu64,
                (void*)this, aFrames, mTotalFrames));
       mLastLogFrames = mTotalFrames;
     }
   }
 
+#ifdef DEBUG
+  if (mSources.Length() >= 1) {
+    MediaStreamGraphImpl* graph = mSources[0]->GraphImpl();
+    TimeStamp currentTime = graph->CurrentDriver()->GetCurrentTimeStamp();
+    MOZ_ASSERT(currentTime > mLastAppendTime,
+               "CurrentTime didn't advance since last append");
+    mLastAppendTime = currentTime;
+  }
+#endif
+
   size_t len = mSources.Length();
   for (size_t i = 0; i < len; i++) {
     if (!mSources[i]) {
       continue;
     }
 
     TimeStamp insertTime;
     // Make sure we include the stream and the track.
--- a/dom/media/webrtc/moz.build
+++ b/dom/media/webrtc/moz.build
@@ -35,16 +35,17 @@ if CONFIG['MOZ_WEBRTC']:
         'RTCIdentityProviderRegistrar.cpp',
     ]
     # MediaEngineWebRTC.cpp needs to be built separately.
     SOURCES += [
         'MediaEngineWebRTC.cpp',
     ]
     LOCAL_INCLUDES += [
         '/dom/base',
+        '/dom/media',
         '/media/libyuv/libyuv/include',
         '/media/webrtc/signaling/src/common',
         '/media/webrtc/signaling/src/common/browser_logging',
         '/media/webrtc/trunk',
     ]
 
 XPIDL_SOURCES += [
     'nsITabSource.idl'