--- a/dom/media/GraphDriver.cpp
+++ b/dom/media/GraphDriver.cpp
@@ -43,16 +43,17 @@ struct AutoProfilerUnregisterThread
}
};
GraphDriver::GraphDriver(MediaStreamGraphImpl* aGraphImpl)
: mIterationStart(0),
mIterationEnd(0),
mGraphImpl(aGraphImpl),
mWaitState(WAITSTATE_RUNNING),
+ mAudioInput(nullptr),
mCurrentTimeStamp(TimeStamp::Now()),
mPreviousDriver(nullptr),
mNextDriver(nullptr)
{ }
void GraphDriver::SetGraphTime(GraphDriver* aPreviousDriver,
GraphTime aLastSwitchNextIterationStart,
GraphTime aLastSwitchNextIterationEnd)
@@ -534,16 +535,17 @@ StreamAndPromiseForOperation::StreamAndP
// MOZ_ASSERT(aPromise);
}
AudioCallbackDriver::AudioCallbackDriver(MediaStreamGraphImpl* aGraphImpl)
: GraphDriver(aGraphImpl)
, mSampleRate(0)
, mIterationDurationMS(MEDIA_GRAPH_TARGET_PERIOD_MS)
, mStarted(false)
+ , mAudioInput(nullptr)
, mAudioChannel(aGraphImpl->AudioChannel())
, mInCallback(false)
, mMicrophoneActive(false)
#ifdef XP_MACOSX
, mCallbackReceivedWhileSwitching(0)
#endif
{
STREAM_LOG(LogLevel::Debug, ("AudioCallbackDriver ctor for graph %p", aGraphImpl));
@@ -895,18 +897,18 @@ AudioCallbackDriver::DataCallback(AudioD
mBuffer.BufferFilled();
// Callback any observers for the AEC speaker data. Note that one
// (maybe) of these will be full-duplex, the others will get their input
// data off separate cubeb callbacks. Take care with how stuff is
// removed/added to this list and TSAN issues, but input and output will
// use separate callback methods.
- mGraphImpl->NotifySpeakerData(aOutputBuffer, static_cast<size_t>(aFrames),
- ChannelCount);
+ mGraphImpl->NotifyOutputData(aOutputBuffer, static_cast<size_t>(aFrames),
+ ChannelCount);
// Process mic data if any/needed -- after inserting far-end data for AEC!
if (aInputBuffer) {
if (mAudioInput) { // for this specific input-only or full-duplex stream
mAudioInput->NotifyInputData(mGraphImpl, aInputBuffer,
static_cast<size_t>(aFrames),
ChannelCount);
}
--- a/dom/media/GraphDriver.h
+++ b/dom/media/GraphDriver.h
@@ -187,23 +187,22 @@ public:
void EnsureNextIterationLocked();
MediaStreamGraphImpl* GraphImpl() {
return mGraphImpl;
}
virtual bool OnThread() = 0;
- // XXX Thread-safety! Do these via commands to avoid TSAN issues
- // and crashes!!!
- virtual void SetInputListener(MediaStreamListener *aListener) {
+ // These are invoked on the MSG thread (or MainThread in shutdown)
+ virtual void SetInputListener(AudioDataListener *aListener) {
mAudioInput = aListener;
}
// XXX do we need the param? probably no
- virtual void RemoveInputListener(MediaStreamListener *aListener) {
+ virtual void RemoveInputListener(AudioDataListener *aListener) {
mAudioInput = nullptr;
}
protected:
GraphTime StateComputedTime() const;
// Time of the start of this graph iteration. This must be accessed while
// having the monitor.
@@ -228,17 +227,17 @@ protected:
// Something has signaled RunThread() to wake up immediately,
// but it hasn't done so yet
WAITSTATE_WAKING_UP
};
// This must be access with the monitor.
WaitState mWaitState;
// Callback for mic data, if any
- RefPtr<MediaStreamListener> mAudioInput;
+ RefPtr<AudioDataListener> mAudioInput;
// This is used on the main thread (during initialization), and the graph
// thread. No monitor needed because we know the graph thread does not run
// during the initialization.
TimeStamp mCurrentTimeStamp;
// This is non-null only when this driver has recently switched from an other
// driver, and has not cleaned it up yet (for example because the audio stream
// is currently calling the callback during initialization).
@@ -493,17 +492,17 @@ private:
* thread (if this driver is the first one).
* This is read on previous driver's thread (during callbacks from
* cubeb_stream_init) and the audio thread (when switching away from this
* driver back to a SystemClockDriver).
* This is synchronized by the Graph's monitor.
* */
bool mStarted;
/* Listener for mic input, if any. */
- RefPtr<MediaStreamListener> mAudioInput;
+ RefPtr<AudioDataListener> mAudioInput;
struct AutoInCallback
{
explicit AutoInCallback(AudioCallbackDriver* aDriver);
~AutoInCallback();
AudioCallbackDriver* mDriver;
};
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -919,91 +919,91 @@ MediaStreamGraphImpl::PlayVideo(MediaStr
// If the stream has finished and the timestamps of all frames have expired
// then no more updates are required.
if (aStream->mFinished && !haveMultipleImages) {
aStream->mLastPlayedVideoFrame.SetNull();
}
}
void
-MediaStreamGraphImpl::OpenAudioInputImpl(char *aName, MediaStreamListener *aListener)
+MediaStreamGraphImpl::OpenAudioInputImpl(char *aName, AudioDataListener *aListener)
{
if (CurrentDriver()->AsAudioCallbackDriver()) {
CurrentDriver()->SetInputListener(aListener);
} else {
// XXX Switch to callback driver
}
mAudioInputs.AppendElement(aListener); // always monitor speaker data
}
nsresult
-MediaStreamGraphImpl::OpenAudioInput(char *aName, MediaStreamListener *aListener)
+MediaStreamGraphImpl::OpenAudioInput(char *aName, AudioDataListener *aListener)
{
// XXX So, so, so annoying. Can't AppendMessage except on Mainthread
if (!NS_IsMainThread()) {
NS_DispatchToMainThread(WrapRunnable(this,
&MediaStreamGraphImpl::OpenAudioInput,
aName, aListener)); // XXX Fix! string need to copied
return NS_OK;
}
class Message : public ControlMessage {
public:
- Message(MediaStreamGraphImpl *aGraph, char *aName, MediaStreamListener *aListener) :
+ Message(MediaStreamGraphImpl *aGraph, char *aName, AudioDataListener *aListener) :
ControlMessage(nullptr), mGraph(aGraph), mName(aName), mListener(aListener) {}
virtual void Run()
{
mGraph->OpenAudioInputImpl(mName, mListener);
}
MediaStreamGraphImpl *mGraph;
char *mName; // XXX needs to copy
- MediaStreamListener *mListener;
+ RefPtr<AudioDataListener> mListener;
};
this->AppendMessage(new Message(this, aName, aListener));
return NS_OK;
}
void
-MediaStreamGraphImpl::CloseAudioInputImpl(MediaStreamListener *aListener)
+MediaStreamGraphImpl::CloseAudioInputImpl(AudioDataListener *aListener)
{
CurrentDriver()->RemoveInputListener(aListener);
mAudioInputs.RemoveElement(aListener);
}
void
-MediaStreamGraphImpl::CloseAudioInput(MediaStreamListener *aListener)
+MediaStreamGraphImpl::CloseAudioInput(AudioDataListener *aListener)
{
// XXX So, so, so annoying. Can't AppendMessage except on Mainthread
if (!NS_IsMainThread()) {
NS_DispatchToMainThread(WrapRunnable(this,
&MediaStreamGraphImpl::CloseAudioInput,
aListener));
return;
}
class Message : public ControlMessage {
public:
- Message(MediaStreamGraphImpl *aGraph, MediaStreamListener *aListener) :
+ Message(MediaStreamGraphImpl *aGraph, AudioDataListener *aListener) :
ControlMessage(nullptr), mGraph(aGraph), mListener(aListener) {}
virtual void Run()
{
mGraph->CloseAudioInputImpl(mListener);
}
MediaStreamGraphImpl *mGraph;
- MediaStreamListener *mListener;
+ RefPtr<AudioDataListener> mListener;
};
this->AppendMessage(new Message(this, aListener));
}
// All AudioInput listeners get the same speaker data (at least for now).
void
-MediaStreamGraph::NotifySpeakerData(AudioDataValue* aBuffer, size_t aFrames,
- uint32_t aChannels)
+MediaStreamGraph::NotifyOutputData(AudioDataValue* aBuffer, size_t aFrames,
+ uint32_t aChannels)
{
for (auto& listener : mAudioInputs) {
- listener->NotifySpeakerData(this, aBuffer, aFrames, aChannels);
+ listener->NotifyOutputData(this, aBuffer, aFrames, aChannels);
}
}
bool
MediaStreamGraphImpl::ShouldUpdateMainThread()
{
if (mRealtime) {
return true;
--- a/dom/media/MediaStreamGraph.h
+++ b/dom/media/MediaStreamGraph.h
@@ -176,33 +176,49 @@ public:
TrackID aInputTrackID = TRACK_INVALID) {}
/**
* Notify that all new tracks this iteration have been created.
* This is to ensure that tracks added atomically to MediaStreamGraph
* are also notified of atomically to MediaStreamListeners.
*/
virtual void NotifyFinishedTrackCreation(MediaStreamGraph* aGraph) {}
+};
+class AudioDataListenerInterface {
+protected:
+ // Protected destructor, to discourage deletion outside of Release():
+ virtual ~AudioDataListenerInterface() {}
+
+public:
/* These are for cubeb audio input & output streams: */
/**
* Output data to speakers, for use as the "far-end" data for echo
* cancellation. This is not guaranteed to be in any particular size
* chunks.
*/
- virtual void NotifySpeakerData(MediaStreamGraph* aGraph,
- AudioDataValue* aBuffer, size_t aFrames,
- uint32_t aChannels) {}
+ virtual void NotifyOutputData(MediaStreamGraph* aGraph,
+ AudioDataValue* aBuffer, size_t aFrames,
+ uint32_t aChannels) = 0;
/**
* Input data from a microphone (or other audio source. This is not
* guaranteed to be in any particular size chunks.
*/
virtual void NotifyInputData(MediaStreamGraph* aGraph,
AudioDataValue* aBuffer, size_t aFrames,
- uint32_t aChannels) {}
+ uint32_t aChannels) = 0;
+};
+
+class AudioDataListener : public AudioDataListenerInterface {
+protected:
+ // Protected destructor, to discourage deletion outside of Release():
+ virtual ~AudioDataListener() {}
+
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioDataListener)
};
/**
* This is a base class for media graph thread listener direct callbacks
* from within AppendToTrack(). Note that your regular listener will
* still get NotifyQueuedTrackChanges() callbacks from the MSG thread, so
* you must be careful to ignore them if AddDirectListener was successful.
*/
@@ -1187,20 +1203,20 @@ public:
};
// Main thread only
static MediaStreamGraph* GetInstance(GraphDriverType aGraphDriverRequested,
dom::AudioChannel aChannel);
static MediaStreamGraph* CreateNonRealtimeInstance(TrackRate aSampleRate);
// Idempotent
static void DestroyNonRealtimeInstance(MediaStreamGraph* aGraph);
- virtual nsresult OpenAudioInput(char *aName, MediaStreamListener *aListener) {
+ virtual nsresult OpenAudioInput(char *aName, AudioDataListener *aListener) {
return NS_ERROR_FAILURE;
}
- virtual void CloseAudioInput(MediaStreamListener *aListener) {}
+ virtual void CloseAudioInput(AudioDataListener *aListener) {}
// Control API.
/**
* Create a stream that a media decoder (or some other source of
* media data, such as a camera) can write to.
*/
SourceMediaStream* CreateSourceStream(DOMMediaStream* aWrapper);
/**
@@ -1299,14 +1315,14 @@ protected:
/**
* Sample rate at which this graph runs. For real time graphs, this is
* the rate of the audio mixer. For offline graphs, this is the rate specified
* at construction.
*/
TrackRate mSampleRate;
- nsTArray<RefPtr<MediaStreamListener>> mAudioInputs;
+ nsTArray<RefPtr<AudioDataListener>> mAudioInputs;
};
} // namespace mozilla
#endif /* MOZILLA_MEDIASTREAMGRAPH_H_ */
--- a/dom/media/MediaStreamGraphImpl.h
+++ b/dom/media/MediaStreamGraphImpl.h
@@ -345,20 +345,20 @@ public:
* Set the correct current video frame for stream aStream.
*/
void PlayVideo(MediaStream* aStream);
/**
* No more data will be forthcoming for aStream. The stream will end
* at the current buffer end point. The StreamBuffer's tracks must be
* explicitly set to finished by the caller.
*/
- void OpenAudioInputImpl(char *aName, MediaStreamListener *aListener);
- virtual nsresult OpenAudioInput(char *aName, MediaStreamListener *aListener) override;
- void CloseAudioInputImpl(MediaStreamListener *aListener);
- virtual void CloseAudioInput(MediaStreamListener *aListener) override;
+ void OpenAudioInputImpl(char *aName, AudioDataListener *aListener);
+ virtual nsresult OpenAudioInput(char *aName, AudioDataListener *aListener) override;
+ void CloseAudioInputImpl(AudioDataListener *aListener);
+ virtual void CloseAudioInput(AudioDataListener *aListener) override;
void FinishStream(MediaStream* aStream);
/**
* Compute how much stream data we would like to buffer for aStream.
*/
StreamTime GetDesiredBufferEnd(MediaStream* aStream);
/**
* Returns true when there are no active streams.
--- a/dom/media/webrtc/MediaEngine.h
+++ b/dom/media/webrtc/MediaEngine.h
@@ -261,17 +261,18 @@ protected:
: MediaEngineSource(aState) {}
MediaEngineVideoSource()
: MediaEngineSource(kReleased) {}
};
/**
* Audio source and friends.
*/
-class MediaEngineAudioSource : public MediaEngineSource
+class MediaEngineAudioSource : public MediaEngineSource,
+ public AudioDataListenerInterface
{
public:
virtual ~MediaEngineAudioSource() {}
protected:
explicit MediaEngineAudioSource(MediaEngineState aState)
: MediaEngineSource(aState) {}
MediaEngineAudioSource()
--- a/dom/media/webrtc/MediaEngineDefault.h
+++ b/dom/media/webrtc/MediaEngineDefault.h
@@ -139,16 +139,24 @@ public:
#ifdef DEBUG
StreamBuffer::Track* data = aSource->FindTrack(aId);
NS_WARN_IF_FALSE(!data || data->IsEnded() ||
aDesiredTime <= aSource->GetEndOfAppendedData(aId),
"MediaEngineDefaultAudioSource data underrun");
#endif
}
+ void NotifyOutputData(MediaStreamGraph* aGraph,
+ AudioDataValue* aBuffer, size_t aFrames,
+ uint32_t aChannels) override
+ {}
+ void NotifyInputData(MediaStreamGraph* aGraph,
+ AudioDataValue* aBuffer, size_t aFrames,
+ uint32_t aChannels) override
+ {}
virtual bool IsFake() override {
return true;
}
virtual dom::MediaSourceEnum GetMediaSource() const override {
return dom::MediaSourceEnum::Microphone;
}
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -24,16 +24,17 @@
#include "VideoUtils.h"
#include "MediaEngineCameraVideoSource.h"
#include "VideoSegment.h"
#include "AudioSegment.h"
#include "StreamBuffer.h"
#include "MediaStreamGraph.h"
#include "cubeb/cubeb.h"
#include "CubebUtils.h"
+#include "AudioPacketizer.h"
#include "MediaEngineWrapper.h"
#include "mozilla/dom/MediaStreamTrackBinding.h"
// WebRTC library includes follow
#include "webrtc/common.h"
// Audio Engine
#include "webrtc/voice_engine/include/voe_base.h"
#include "webrtc/voice_engine/include/voe_codec.h"
@@ -93,16 +94,24 @@ public:
void SetDirectListeners(bool aDirect) override
{}
nsresult Config(bool aEchoOn, uint32_t aEcho, bool aAgcOn,
uint32_t aAGC, bool aNoiseOn, uint32_t aNoise,
int32_t aPlayoutDelay) override
{
return NS_OK;
}
+ void NotifyOutputData(MediaStreamGraph* aGraph,
+ AudioDataValue* aBuffer, size_t aFrames,
+ uint32_t aChannels) override
+ {}
+ void NotifyInputData(MediaStreamGraph* aGraph,
+ AudioDataValue* aBuffer, size_t aFrames,
+ uint32_t aChannels) override
+ {}
void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream* aSource,
TrackID aID, StreamTime aDesiredTime) override
{}
dom::MediaSourceEnum GetMediaSource() const override
{
return dom::MediaSourceEnum::AudioCapture;
}
bool IsFake() override
@@ -130,26 +139,25 @@ public:
virtual ~AudioInput() {}
NS_INLINE_DECL_REFCOUNTING(AudioInput)
virtual int GetNumOfRecordingDevices(int& aDevices) = 0;
virtual int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
char aStrGuidUTF8[128]) = 0;
virtual int GetRecordingDeviceStatus(bool& aIsAvailable) = 0;
- virtual void StartRecording(MediaStreamGraph *aGraph) = 0;
- virtual void StopRecording(MediaStreamGraph *aGraph) = 0;
+ virtual void StartRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener) = 0;
+ virtual void StopRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener) = 0;
virtual int SetRecordingDevice(int aIndex) = 0;
protected:
webrtc::VoiceEngine* mVoiceEngine;
};
-class AudioInputCubeb : public AudioInput,
- public MediaStreamListener
+class AudioInputCubeb : public AudioInput
{
public:
AudioInputCubeb(webrtc::VoiceEngine* aVoiceEngine) :
AudioInput(aVoiceEngine), mDevices(nullptr) {}
virtual ~AudioInputCubeb()
{
if (mDevices) {
cubeb_device_collection_destroy(mDevices);
@@ -192,34 +200,34 @@ public:
virtual int GetRecordingDeviceStatus(bool& aIsAvailable)
{
// With cubeb, we only expose devices of type CUBEB_DEVICE_TYPE_INPUT
aIsAvailable = true;
return 0;
}
- virtual void StartRecording(MediaStreamGraph *aGraph)
+ virtual void StartRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener)
{
ScopedCustomReleasePtr<webrtc::VoEExternalMedia> ptrVoERender;
ptrVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
if (ptrVoERender) {
ptrVoERender->SetExternalRecordingStatus(true);
}
- aGraph->OpenAudioInput(nullptr, this);
+ aGraph->OpenAudioInput(nullptr, aListener);
}
- virtual void StopRecording(MediaStreamGraph *aGraph)
+ virtual void StopRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener)
{
- aGraph->CloseAudioInput(this);
+ aGraph->CloseAudioInput(aListener);
}
virtual int SetRecordingDevice(int aIndex)
{
- // Not relevant to cubeb
+ // Relevant with devid support
return 1;
}
private:
cubeb_device_collection* mDevices;
};
class AudioInputWebRTC : public AudioInput
@@ -256,30 +264,59 @@ public:
ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
if (!ptrVoEHw) {
return 1;
}
ptrVoEHw->GetRecordingDeviceStatus(aIsAvailable);
return 0;
}
- virtual void StartRecording(MediaStreamGraph *aGraph) {}
- virtual void StopRecording(MediaStreamGraph *aGraph) {}
+ virtual void StartRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener) {}
+ virtual void StopRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener) {}
virtual int SetRecordingDevice(int aIndex)
{
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
if (!ptrVoEHw) {
return 1;
}
return ptrVoEHw->SetRecordingDevice(aIndex);
}
};
+class WebRTCAudioDataListener : public AudioDataListener
+{
+protected:
+ // Protected destructor, to discourage deletion outside of Release():
+ virtual ~WebRTCAudioDataListener() {}
+
+public:
+ WebRTCAudioDataListener(MediaEngineAudioSource* aAudioSource) :
+ mAudioSource(aAudioSource)
+ {}
+
+ // AudioDataListenerInterface methods
+ virtual void NotifyOutputData(MediaStreamGraph* aGraph,
+ AudioDataValue* aBuffer, size_t aFrames,
+ uint32_t aChannels) override
+ {
+ mAudioSource->NotifyOutputData(aGraph, aBuffer, aFrames, aChannels);
+ }
+ virtual void NotifyInputData(MediaStreamGraph* aGraph,
+ AudioDataValue* aBuffer, size_t aFrames,
+ uint32_t aChannels) override
+ {
+ mAudioSource->NotifyInputData(aGraph, aBuffer, aFrames, aChannels);
+ }
+
+private:
+ RefPtr<MediaEngineAudioSource> mAudioSource;
+};
+
class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource,
public webrtc::VoEMediaProcess,
private MediaConstraintsHelper
{
public:
MediaEngineWebRTCMicrophoneSource(nsIThread* aThread,
webrtc::VoiceEngine* aVoiceEnginePtr,
mozilla::AudioInput* aAudioInput,
@@ -302,17 +339,18 @@ public:
, mAGC(webrtc::kAgcDefault)
, mNoiseSuppress(webrtc::kNsDefault)
, mPlayoutDelay(0)
, mNullTransport(nullptr) {
MOZ_ASSERT(aVoiceEnginePtr);
MOZ_ASSERT(aAudioInput);
mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
mDeviceUUID.Assign(uuid);
- Init();
+ mListener = new mozilla::WebRTCAudioDataListener(this);
+ Init();
}
virtual void GetName(nsAString& aName) override;
virtual void GetUUID(nsACString& aUUID) override;
virtual nsresult Allocate(const dom::MediaTrackConstraints& aConstraints,
const MediaEnginePrefs& aPrefs,
const nsString& aDeviceId) override;
@@ -328,16 +366,24 @@ public:
bool aNoiseOn, uint32_t aNoise,
int32_t aPlayoutDelay) override;
virtual void NotifyPull(MediaStreamGraph* aGraph,
SourceMediaStream* aSource,
TrackID aId,
StreamTime aDesiredTime) override;
+ // AudioDataListenerInterface methods
+ virtual void NotifyOutputData(MediaStreamGraph* aGraph,
+ AudioDataValue* aBuffer, size_t aFrames,
+ uint32_t aChannels) override;
+ virtual void NotifyInputData(MediaStreamGraph* aGraph,
+ AudioDataValue* aBuffer, size_t aFrames,
+ uint32_t aChannels) override;
+
virtual bool IsFake() override {
return false;
}
virtual dom::MediaSourceEnum GetMediaSource() const override {
return dom::MediaSourceEnum::Microphone;
}
@@ -362,22 +408,25 @@ public:
protected:
~MediaEngineWebRTCMicrophoneSource() { Shutdown(); }
private:
void Init();
webrtc::VoiceEngine* mVoiceEngine;
RefPtr<mozilla::AudioInput> mAudioInput;
+ RefPtr<WebRTCAudioDataListener> mListener;
ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase;
ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mVoEProcessing;
+ nsAutoPtr<AudioPacketizer<AudioDataValue, int16_t>> mPacketizer;
+
// mMonitor protects mSources[] access/changes, and transitions of mState
// from kStarted to kStopped (which are combined with EndTrack()).
// mSources[] is accessed from webrtc threads.
Monitor mMonitor;
nsTArray<RefPtr<SourceMediaStream>> mSources;
nsCOMPtr<nsIThread> mThread;
int mCapIndex;
int mChannel;
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -376,17 +376,17 @@ MediaEngineWebRTCMicrophoneSource::Start
}
if (mVoEBase->StartSend(mChannel)) {
return NS_ERROR_FAILURE;
}
// Attach external media processor, so this::Process will be called.
mVoERender->RegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel, *this);
- mAudioInput->StartRecording(aStream->Graph());
+ mAudioInput->StartRecording(aStream->Graph(), mListener);
return NS_OK;
}
nsresult
MediaEngineWebRTCMicrophoneSource::Stop(SourceMediaStream *aSource, TrackID aID)
{
AssertIsOnOwningThread();
@@ -408,17 +408,17 @@ MediaEngineWebRTCMicrophoneSource::Stop(
}
if (!mVoEBase) {
return NS_ERROR_FAILURE;
}
mState = kStopped;
}
- mAudioInput->StopRecording(aSource->Graph());
+ mAudioInput->StopRecording(aSource->Graph(), mListener);
mVoERender->DeRegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel);
if (mVoEBase->StopSend(mChannel)) {
return NS_ERROR_FAILURE;
}
if (mVoEBase->StopReceive(mChannel)) {
return NS_ERROR_FAILURE;
@@ -440,16 +440,49 @@ MediaEngineWebRTCMicrophoneSource::Notif
TrackID aID,
StreamTime aDesiredTime)
{
// Ignore - we push audio data
LOG_FRAMES(("NotifyPull, desired = %ld", (int64_t) aDesiredTime));
}
void
+MediaEngineWebRTCMicrophoneSource::NotifyOutputData(MediaStreamGraph* aGraph,
+ AudioDataValue* aBuffer,
+ size_t aFrames,
+ uint32_t aChannels)
+{
+}
+
+// Called back on GraphDriver thread
+void
+MediaEngineWebRTCMicrophoneSource::NotifyInputData(MediaStreamGraph* aGraph,
+ AudioDataValue* aBuffer,
+ size_t aFrames,
+ uint32_t aChannels)
+{
+ // This will call Process() with data coming out of the AEC/NS/AGC/etc chain
+ if (!mPacketizer ||
+ mPacketizer->PacketSize() != mSampleFrequency/100 ||
+ mPacketizer->Channels() != aChannels) {
+ // It's ok to drop the audio still in the packetizer here.
+ mPacketizer = new AudioPacketizer<AudioDataValue, int16_t>(mSampleFrequency/100, aChannels);
+ }
+
+ mPacketizer->Input(aBuffer, static_cast<uint32_t>(aFrames));
+
+ while (mPacketizer->PacketsAvailable()) {
+ uint32_t samplesPerPacket = mPacketizer->PacketSize() *
+ mPacketizer->Channels();
+ int16_t* packet = mPacketizer->Output();
+ mVoERender->ExternalRecordingInsertData(packet, samplesPerPacket, mSampleFrequency, 0);
+ }
+}
+
+void
MediaEngineWebRTCMicrophoneSource::Init()
{
mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
mVoEBase->Init();
mVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
if (!mVoERender) {