Bug 1397793 - Remove VoEExternalMedia usage in MediaEngineWebRTCAudio and MediaEngineWebRTC. r?pehrsons
This needs the next patches to build fine, but is split out for the review.
A side effect of this patch is to break non-duplex, making the whole
init/cleanup phase much simpler.
MozReview-Commit-ID: Caqc8v7CWwZ
--- a/dom/media/webrtc/MediaEngine.h
+++ b/dom/media/webrtc/MediaEngine.h
@@ -48,18 +48,16 @@ public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaEngine)
static const int DEFAULT_VIDEO_FPS = 30;
static const int DEFAULT_43_VIDEO_WIDTH = 640;
static const int DEFAULT_43_VIDEO_HEIGHT = 480;
static const int DEFAULT_169_VIDEO_WIDTH = 1280;
static const int DEFAULT_169_VIDEO_HEIGHT = 720;
- static const int DEFAULT_SAMPLE_RATE = 32000;
-
// This allows using whatever rate the graph is using for the
// MediaStreamTrack. This is useful for microphone data, we know it's already
// at the correct rate for insertion in the MSG.
static const int USE_GRAPH_RATE = -1;
/* Populate an array of video sources in the nsTArray. Also include devices
* that are currently unavailable. */
virtual void EnumerateVideoDevices(dom::MediaSourceEnum,
--- a/dom/media/webrtc/MediaEngineWebRTC.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTC.cpp
@@ -102,17 +102,16 @@ void AudioInputCubeb::UpdateDeviceList()
StaticMutexAutoLock lock(sMutex);
// swap state
cubeb_device_collection_destroy(cubebContext, &mDevices);
mDevices = devices;
}
MediaEngineWebRTC::MediaEngineWebRTC(MediaEnginePrefs &aPrefs)
: mMutex("mozilla::MediaEngineWebRTC"),
- mVoiceEngine(nullptr),
mAudioInput(nullptr),
mFullDuplex(aPrefs.mFullDuplex),
mDelayAgnostic(aPrefs.mDelayAgnostic),
mExtendedFilter(aPrefs.mExtendedFilter),
mHasTabVideoSource(false)
{
nsCOMPtr<nsIComponentRegistrar> compMgr;
NS_GetComponentRegistrar(getter_AddRefs(compMgr));
@@ -275,53 +274,21 @@ MediaEngineWebRTC::EnumerateAudioDevices
if (aMediaSource == dom::MediaSourceEnum::AudioCapture) {
RefPtr<MediaEngineWebRTCAudioCaptureSource> audioCaptureSource =
new MediaEngineWebRTCAudioCaptureSource(nullptr);
aASources->AppendElement(audioCaptureSource);
return;
}
-#ifdef MOZ_WIDGET_ANDROID
- JavaVM* jvm = mozilla::jni::GetVM();
- jobject context = mozilla::AndroidBridge::Bridge()->GetGlobalContextRef();
-
- if (webrtc::VoiceEngine::SetAndroidObjects(jvm, (void*)context) != 0) {
- LOG(("VoiceEngine:SetAndroidObjects Failed"));
- return;
- }
-#endif
-
- if (!mVoiceEngine) {
- mVoiceEngine = webrtc::VoiceEngine::Create();
- if (!mVoiceEngine) {
+ if (!mAudioInput) {
+ if (!SupportsDuplex()) {
return;
}
- }
-
- ptrVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
- if (!ptrVoEBase) {
- return;
- }
-
- // Always re-init the voice engine, since if we close the last use we
- // DeInitEngine() and Terminate(), which shuts down Process() - but means
- // we have to Init() again before using it. Init() when already inited is
- // just a no-op, so call always.
- if (ptrVoEBase->Init() < 0) {
- return;
- }
-
- if (!mAudioInput) {
- if (SupportsDuplex()) {
- // The platform_supports_full_duplex.
- mAudioInput = new mozilla::AudioInputCubeb(mVoiceEngine);
- } else {
- mAudioInput = new mozilla::AudioInputWebRTC(mVoiceEngine);
- }
+ mAudioInput = new mozilla::AudioInputCubeb();
}
int nDevices = 0;
mAudioInput->GetNumOfRecordingDevices(nDevices);
int i;
#if defined(MOZ_WIDGET_ANDROID)
i = 0; // Bug 1037025 - let the OS handle defaulting for now on android/b2g
#else
@@ -339,36 +306,26 @@ MediaEngineWebRTC::EnumerateAudioDevices
int error = mAudioInput->GetRecordingDeviceName(i, deviceName, uniqueId);
if (error) {
LOG((" AudioInput::GetRecordingDeviceName: Failed %d", error));
continue;
}
if (uniqueId[0] == '\0') {
// Mac and Linux don't set uniqueId!
- MOZ_ASSERT(sizeof(deviceName) == sizeof(uniqueId)); // total paranoia
strcpy(uniqueId, deviceName); // safe given assert and initialization/error-check
}
RefPtr<MediaEngineAudioSource> aSource;
NS_ConvertUTF8toUTF16 uuid(uniqueId);
if (mAudioSources.Get(uuid, getter_AddRefs(aSource))) {
// We've already seen this device, just append.
aASources->AppendElement(aSource.get());
} else {
- AudioInput* audioinput = mAudioInput;
- if (SupportsDuplex()) {
- // The platform_supports_full_duplex.
-
- // For cubeb, it has state (the selected ID)
- // XXX just use the uniqueID for cubeb and support it everywhere, and get rid of this
- // XXX Small window where the device list/index could change!
- audioinput = new mozilla::AudioInputCubeb(mVoiceEngine, i);
- }
- aSource = new MediaEngineWebRTCMicrophoneSource(mVoiceEngine, audioinput,
+ aSource = new MediaEngineWebRTCMicrophoneSource(new mozilla::AudioInputCubeb(i),
i, deviceName, uniqueId,
mDelayAgnostic, mExtendedFilter);
mAudioSources.Put(uuid, aSource); // Hashtable takes ownership.
aASources->AppendElement(aSource);
}
}
}
@@ -396,20 +353,13 @@ MediaEngineWebRTC::Shutdown()
MediaEngineAudioSource* source = iter.UserData();
if (source) {
source->Shutdown();
}
}
mVideoSources.Clear();
mAudioSources.Clear();
- if (mVoiceEngine) {
- mVoiceEngine->SetTraceCallback(nullptr);
- webrtc::VoiceEngine::Delete(mVoiceEngine);
- }
-
- mVoiceEngine = nullptr;
-
mozilla::camera::Shutdown();
AudioInputCubeb::CleanupGlobalData();
}
}
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -136,17 +136,17 @@ protected:
virtual ~MediaEngineWebRTCAudioCaptureSource() {}
nsCString mUUID;
};
// Small subset of VoEHardware
class AudioInput
{
public:
- explicit AudioInput(webrtc::VoiceEngine* aVoiceEngine) : mVoiceEngine(aVoiceEngine) {};
+ AudioInput() = default;
// Threadsafe because it's referenced from an MicrophoneSource, which can
// had references to it on other threads.
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioInput)
virtual int GetNumOfRecordingDevices(int& aDevices) = 0;
virtual int GetRecordingDeviceName(int aIndex, char (&aStrNameUTF8)[128],
char aStrGuidUTF8[128]) = 0;
virtual int GetRecordingDeviceStatus(bool& aIsAvailable) = 0;
@@ -155,25 +155,23 @@ public:
virtual void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener) = 0;
virtual void StopRecording(SourceMediaStream *aStream) = 0;
virtual int SetRecordingDevice(int aIndex) = 0;
virtual void SetUserChannelCount(uint32_t aChannels) = 0;
protected:
// Protected destructor, to discourage deletion outside of Release():
virtual ~AudioInput() {}
-
- webrtc::VoiceEngine* mVoiceEngine;
};
class AudioInputCubeb final : public AudioInput
{
public:
- explicit AudioInputCubeb(webrtc::VoiceEngine* aVoiceEngine, int aIndex = 0) :
- AudioInput(aVoiceEngine), mSelectedDevice(aIndex), mInUseCount(0)
+ explicit AudioInputCubeb(int aIndex = 0) :
+ AudioInput(), mSelectedDevice(aIndex), mInUseCount(0)
{
if (!mDeviceIndexes) {
mDeviceIndexes = new nsTArray<int>;
mDeviceNames = new nsTArray<nsCString>;
mDefaultDevice = -1;
}
}
@@ -309,24 +307,17 @@ public:
{
#ifdef MOZ_WIDGET_ANDROID
// OpenSL ES does not support enumerating devices.
MOZ_ASSERT(mDevices.count == 0);
#else
MOZ_ASSERT(mDevices.count > 0);
#endif
- if (mInUseCount == 0) {
- ScopedCustomReleasePtr<webrtc::VoEExternalMedia> ptrVoEXMedia;
- ptrVoEXMedia = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
- if (ptrVoEXMedia) {
- ptrVoEXMedia->SetExternalRecordingStatus(true);
- }
- mAnyInUse = true;
- }
+ mAnyInUse = true;
mInUseCount++;
// Always tell the stream we're using it for input
aStream->OpenAudioInput(mSelectedDevice, aListener);
}
void StopRecording(SourceMediaStream *aStream)
{
aStream->CloseAudioInput();
@@ -364,87 +355,16 @@ private:
static int mDefaultDevice; // -1 == not set
static nsTArray<nsCString>* mDeviceNames;
static cubeb_device_collection mDevices;
static bool mAnyInUse;
static StaticMutex sMutex;
static uint32_t sUserChannelCount;
};
-class AudioInputWebRTC final : public AudioInput
-{
-public:
- explicit AudioInputWebRTC(webrtc::VoiceEngine* aVoiceEngine) : AudioInput(aVoiceEngine) {}
-
- int GetNumOfRecordingDevices(int& aDevices)
- {
- ScopedCustomReleasePtr<webrtc::VoEBase> ptrVoEBase;
- ptrVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
- if (!ptrVoEBase) {
- return 1;
- }
- aDevices = ptrVoEBase->audio_device_module()->RecordingDevices();
- return 0;
- }
-
- int GetRecordingDeviceName(int aIndex, char (&aStrNameUTF8)[128],
- char aStrGuidUTF8[128])
- {
- ScopedCustomReleasePtr<webrtc::VoEBase> ptrVoEBase;
- ptrVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
- if (!ptrVoEBase) {
- return 1;
- }
- return ptrVoEBase->audio_device_module()->RecordingDeviceName(aIndex,
- aStrNameUTF8,
- aStrGuidUTF8);
- }
-
- int GetRecordingDeviceStatus(bool& aIsAvailable)
- {
- ScopedCustomReleasePtr<webrtc::VoEBase> ptrVoEBase;
- ptrVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
- if (!ptrVoEBase) {
- return 1;
- }
- return ptrVoEBase->audio_device_module()->RecordingIsAvailable(&aIsAvailable);
- }
-
- void GetChannelCount(uint32_t& aChannels)
- {
- aChannels = 1; // default to mono
- }
-
- int GetMaxAvailableChannels(uint32_t& aChannels)
- {
- aChannels = 1;
- return 0;
- }
-
- void SetUserChannelCount(uint32_t aChannels)
- {}
-
- void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener) {}
- void StopRecording(SourceMediaStream *aStream) {}
-
- int SetRecordingDevice(int aIndex)
- {
- ScopedCustomReleasePtr<webrtc::VoEBase> ptrVoEBase;
- ptrVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
- if (!ptrVoEBase) {
- return 1;
- }
- return ptrVoEBase->audio_device_module()->SetRecordingDevice(aIndex);
- }
-
-protected:
- // Protected destructor, to discourage deletion outside of Release():
- ~AudioInputWebRTC() {}
-};
-
class WebRTCAudioDataListener : public AudioDataListener
{
protected:
// Protected destructor, to discourage deletion outside of Release():
virtual ~WebRTCAudioDataListener() {}
public:
explicit WebRTCAudioDataListener(MediaEngineAudioSource* aAudioSource)
@@ -485,23 +405,21 @@ public:
mAudioSource = nullptr;
}
private:
Mutex mMutex;
RefPtr<MediaEngineAudioSource> mAudioSource;
};
-class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource,
- public webrtc::VoEMediaProcess
+class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource
{
typedef MediaEngineAudioSource Super;
public:
- MediaEngineWebRTCMicrophoneSource(webrtc::VoiceEngine* aVoiceEnginePtr,
- mozilla::AudioInput* aAudioInput,
+ MediaEngineWebRTCMicrophoneSource(mozilla::AudioInput* aAudioInput,
int aIndex,
const char* name,
const char* uuid,
bool aDelayAgnostic,
bool aExtendedFilter);
void GetName(nsAString& aName) const override;
void GetUUID(nsACString& aUUID) const override;
@@ -545,21 +463,16 @@ public:
{
return NS_ERROR_NOT_IMPLEMENTED;
}
uint32_t GetBestFitnessDistance(
const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
const nsString& aDeviceId) const override;
- // VoEMediaProcess.
- virtual void Process(int channel, webrtc::ProcessingTypes type,
- int16_t audio10ms[], size_t length,
- int samplingFreq, bool isStereo) override;
-
void Shutdown() override;
NS_DECL_THREADSAFE_ISUPPORTS
protected:
~MediaEngineWebRTCMicrophoneSource() {}
private:
@@ -571,19 +484,16 @@ private:
const nsString& aDeviceId,
const char** aOutBadConstraint) override;
void SetLastPrefs(const MediaEnginePrefs& aPrefs);
// These allocate/configure and release the channel
bool AllocChannel();
void FreeChannel();
- // These start/stop VoEBase and associated interfaces
- bool InitEngine();
- void DeInitEngine();
// This is true when all processing is disabled, we can skip
// packetization, resampling and other processing passes.
bool PassThrough() {
return mSkipProcessing;
}
template<typename T>
void InsertInGraph(const T* aBuffer,
@@ -591,59 +501,51 @@ private:
uint32_t aChannels);
void PacketizeAndProcess(MediaStreamGraph* aGraph,
const AudioDataValue* aBuffer,
size_t aFrames,
TrackRate aRate,
uint32_t aChannels);
- webrtc::VoiceEngine* mVoiceEngine;
RefPtr<mozilla::AudioInput> mAudioInput;
RefPtr<WebRTCAudioDataListener> mListener;
RefPtr<AudioOutputObserver> mAudioOutputObserver;
- // Note: shared across all microphone sources - we don't want to Terminate()
- // the VoEBase until there are no active captures
+ // Note: shared across all microphone sources
static int sChannelsOpen;
- static ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase;
- static ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
- static ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
- static ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mVoEProcessing;
// accessed from the GraphDriver thread except for deletion
nsAutoPtr<AudioPacketizer<AudioDataValue, int16_t>> mPacketizer;
ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERenderListener;
// mMonitor protects mSources[] and mPrinicpalIds[] access/changes, and
// transitions of mState from kStarted to kStopped (which are combined with
// EndTrack()). mSources[] and mPrincipalHandles[] are accessed from webrtc
// threads.
Monitor mMonitor;
nsTArray<RefPtr<SourceMediaStream>> mSources;
nsTArray<PrincipalHandle> mPrincipalHandles; // Maps to mSources.
int mCapIndex;
- int mChannel;
bool mDelayAgnostic;
bool mExtendedFilter;
MOZ_INIT_OUTSIDE_CTOR TrackID mTrackID;
bool mStarted;
nsString mDeviceName;
nsCString mDeviceUUID;
int32_t mSampleFrequency;
uint64_t mTotalFrames;
uint64_t mLastLogFrames;
NullTransport *mNullTransport;
- nsTArray<int16_t> mInputBuffer;
// mSkipProcessing is true if none of the processing passes are enabled,
// because of prefs or constraints. This allows simply copying the audio into
// the MSG, skipping resampling and the whole webrtc.org code.
bool mSkipProcessing;
// To only update microphone when needed, we keep track of previous settings.
MediaEnginePrefs mLastPrefs;
@@ -671,17 +573,16 @@ public:
nsTArray<RefPtr<MediaEngineAudioSource>>*) override;
private:
~MediaEngineWebRTC() {}
nsCOMPtr<nsIThread> mThread;
// gUM runnables can e.g. Enumerate from multiple threads
Mutex mMutex;
- webrtc::VoiceEngine* mVoiceEngine;
RefPtr<mozilla::AudioInput> mAudioInput;
bool mFullDuplex;
bool mDelayAgnostic;
bool mExtendedFilter;
bool mHasTabVideoSource;
// Store devices we've already seen in a hashtable for quick return.
// Maps UUID to MediaEngineSource (one set for audio, one for video).
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -48,20 +48,16 @@ LogModule* AudioLogModule() {
/**
* Webrtc microphone source source.
*/
NS_IMPL_ISUPPORTS0(MediaEngineWebRTCMicrophoneSource)
NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioCaptureSource)
int MediaEngineWebRTCMicrophoneSource::sChannelsOpen = 0;
-ScopedCustomReleasePtr<webrtc::VoEBase> MediaEngineWebRTCMicrophoneSource::mVoEBase;
-ScopedCustomReleasePtr<webrtc::VoEExternalMedia> MediaEngineWebRTCMicrophoneSource::mVoERender;
-ScopedCustomReleasePtr<webrtc::VoENetwork> MediaEngineWebRTCMicrophoneSource::mVoENetwork;
-ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> MediaEngineWebRTCMicrophoneSource::mVoEProcessing;
AudioOutputObserver::AudioOutputObserver()
: mPlayoutFreq(0)
, mPlayoutChannels(0)
, mChunkSize(0)
, mSaved(nullptr)
, mSamplesSaved(0)
, mDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100)
@@ -182,41 +178,36 @@ AudioOutputObserver::InsertFarEnd(const
mSaved = nullptr;
mSamplesSaved = 0;
}
}
}
}
MediaEngineWebRTCMicrophoneSource::MediaEngineWebRTCMicrophoneSource(
- webrtc::VoiceEngine* aVoiceEnginePtr,
mozilla::AudioInput* aAudioInput,
int aIndex,
const char* name,
const char* uuid,
bool aDelayAgnostic,
bool aExtendedFilter)
: MediaEngineAudioSource(kReleased)
- , mVoiceEngine(aVoiceEnginePtr)
, mAudioInput(aAudioInput)
, mMonitor("WebRTCMic.Monitor")
, mCapIndex(aIndex)
- , mChannel(-1)
, mDelayAgnostic(aDelayAgnostic)
, mExtendedFilter(aExtendedFilter)
, mTrackID(TRACK_NONE)
, mStarted(false)
, mSampleFrequency(MediaEngine::DEFAULT_SAMPLE_RATE)
, mTotalFrames(0)
, mLastLogFrames(0)
- , mNullTransport(nullptr)
, mSkipProcessing(false)
, mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100)
{
- MOZ_ASSERT(aVoiceEnginePtr);
MOZ_ASSERT(aAudioInput);
mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
mDeviceUUID.Assign(uuid);
mListener = new mozilla::WebRTCAudioDataListener(this);
mSettings->mEchoCancellation.Construct(0);
mSettings->mAutoGainControl.Construct(0);
mSettings->mNoiseSuppression.Construct(0);
mSettings->mChannelCount.Construct(0);
@@ -313,22 +304,17 @@ MediaEngineWebRTCMicrophoneSource::Updat
prefs.mAecOn ? prefs.mAec : -1,
prefs.mAgcOn ? prefs.mAgc : -1,
prefs.mNoiseOn ? prefs.mNoise : -1,
prefs.mChannels));
switch (mState) {
case kReleased:
MOZ_ASSERT(aHandle);
- if (sChannelsOpen == 0) {
- if (!InitEngine()) {
- LOG(("Audio engine is not initalized"));
- return NS_ERROR_FAILURE;
- }
- } else {
+ if (sChannelsOpen != 0) {
// Until we fix (or wallpaper) support for multiple mic input
// (Bug 1238038) fail allocation for a second device
return NS_ERROR_FAILURE;
}
if (mAudioInput->SetRecordingDevice(mCapIndex)) {
return NS_ERROR_FAILURE;
}
mAudioInput->SetUserChannelCount(prefs.mChannels);
@@ -513,30 +499,18 @@ MediaEngineWebRTCMicrophoneSource::Start
// Make sure logger starts before capture
AsyncLatencyLogger::Get(true);
if (mAudioOutputObserver) {
mAudioOutputObserver->Clear();
}
- if (mVoEBase->StartReceive(mChannel)) {
- return NS_ERROR_FAILURE;
- }
-
- // Must be *before* StartSend() so it will notice we selected external input (full_duplex)
mAudioInput->StartRecording(aStream, mListener);
- if (mVoEBase->StartSend(mChannel)) {
- return NS_ERROR_FAILURE;
- }
-
- // Attach external media processor, so this::Process will be called.
- mVoERender->RegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel, *this);
-
return NS_OK;
}
nsresult
MediaEngineWebRTCMicrophoneSource::Stop(SourceMediaStream *aSource, TrackID aID)
{
AssertIsOnOwningThread();
{
@@ -555,38 +529,27 @@ MediaEngineWebRTCMicrophoneSource::Stop(
if (!mSources.IsEmpty()) {
mAudioInput->StopRecording(aSource);
return NS_OK;
}
if (mState != kStarted) {
return NS_ERROR_FAILURE;
}
- if (!mVoEBase) {
- return NS_ERROR_FAILURE;
- }
mState = kStopped;
}
if (mListener) {
// breaks a cycle, since the WebRTCAudioDataListener has a RefPtr to us
mListener->Shutdown();
mListener = nullptr;
}
mAudioInput->StopRecording(aSource);
- mVoERender->DeRegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel);
-
- if (mVoEBase->StopSend(mChannel)) {
- return NS_ERROR_FAILURE;
- }
- if (mVoEBase->StopReceive(mChannel)) {
- return NS_ERROR_FAILURE;
- }
return NS_OK;
}
void
MediaEngineWebRTCMicrophoneSource::NotifyPull(MediaStreamGraph *aGraph,
SourceMediaStream *aSource,
TrackID aID,
StreamTime aDesiredTime,
@@ -770,140 +733,37 @@ void
MediaEngineWebRTCMicrophoneSource::DeviceChanged() {
// Reset some processing
bool enabled;
ResetProcessingIfNeeded(Agc);
ResetProcessingIfNeeded(Ec);
ResetProcessingIfNeeded(Ns);
}
-bool
-MediaEngineWebRTCMicrophoneSource::InitEngine()
-{
- MOZ_ASSERT(!mVoEBase);
- mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
-
- mVoEBase->Init();
- webrtc::Config config;
- config.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(mExtendedFilter));
- config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(mDelayAgnostic));
- mVoEBase->audio_processing()->SetExtraOptions(config);
-
- mVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
- if (mVoERender) {
- mVoENetwork = webrtc::VoENetwork::GetInterface(mVoiceEngine);
- if (mVoENetwork) {
- mVoEProcessing = webrtc::VoEAudioProcessing::GetInterface(mVoiceEngine);
- if (mVoEProcessing) {
- mNullTransport = new NullTransport();
- return true;
- }
- }
- }
- DeInitEngine();
- return false;
-}
-
-// This shuts down the engine when no channel is open
-void
-MediaEngineWebRTCMicrophoneSource::DeInitEngine()
-{
- if (mVoEBase) {
- mVoEBase->Terminate();
- delete mNullTransport;
- mNullTransport = nullptr;
-
- mVoEProcessing = nullptr;
- mVoENetwork = nullptr;
- mVoERender = nullptr;
- mVoEBase = nullptr;
- }
-}
-
-// This shuts down the engine when no channel is open.
// mState records if a channel is allocated (slightly redundantly to mChannel)
void
MediaEngineWebRTCMicrophoneSource::FreeChannel()
{
if (mState != kReleased) {
- if (mChannel != -1) {
- MOZ_ASSERT(mVoENetwork && mVoEBase);
- if (mVoENetwork) {
- mVoENetwork->DeRegisterExternalTransport(mChannel);
- }
- if (mVoEBase) {
- mVoEBase->DeleteChannel(mChannel);
- }
- mChannel = -1;
- }
mState = kReleased;
MOZ_ASSERT(sChannelsOpen > 0);
- if (--sChannelsOpen == 0) {
- DeInitEngine();
- }
+ --sChannelsOpen;
}
}
bool
MediaEngineWebRTCMicrophoneSource::AllocChannel()
{
- MOZ_ASSERT(mVoEBase);
-
- mChannel = mVoEBase->CreateChannel();
- if (mChannel >= 0) {
- if (!mVoENetwork->RegisterExternalTransport(mChannel, *mNullTransport)) {
- mSampleFrequency = MediaEngine::DEFAULT_SAMPLE_RATE;
- LOG(("%s: sampling rate %u", __FUNCTION__, mSampleFrequency));
-
- // Check for availability.
- if (!mAudioInput->SetRecordingDevice(mCapIndex)) {
- // Because of the permission mechanism of B2G, we need to skip the status
- // check here.
- bool avail = false;
- mAudioInput->GetRecordingDeviceStatus(avail);
- if (!avail) {
- if (sChannelsOpen == 0) {
- DeInitEngine();
- }
- return false;
- }
+ mSampleFrequency = MediaEngine::USE_GRAPH_RATE;
+ LOG(("%s: sampling rate %u", __FUNCTION__, mSampleFrequency));
- // Set "codec" to PCM, 32kHz on device's channels
- ScopedCustomReleasePtr<webrtc::VoECodec> ptrVoECodec(webrtc::VoECodec::GetInterface(mVoiceEngine));
- if (ptrVoECodec) {
- webrtc::CodecInst codec;
- strcpy(codec.plname, ENCODING);
- codec.channels = CHANNELS;
- uint32_t maxChannels = 0;
- if (mAudioInput->GetMaxAvailableChannels(maxChannels) == 0) {
- MOZ_ASSERT(maxChannels);
- codec.channels = std::min<uint32_t>(maxChannels, MAX_CHANNELS);
- }
- MOZ_ASSERT(mSampleFrequency == 16000 || mSampleFrequency == 32000);
- codec.rate = SAMPLE_RATE(mSampleFrequency);
- codec.plfreq = mSampleFrequency;
- codec.pacsize = SAMPLE_LENGTH(mSampleFrequency);
- codec.pltype = 0; // Default payload type
-
- if (!ptrVoECodec->SetSendCodec(mChannel, codec)) {
- mState = kAllocated;
- sChannelsOpen++;
- return true;
- }
- }
- }
- }
- }
- mVoEBase->DeleteChannel(mChannel);
- mChannel = -1;
- if (sChannelsOpen == 0) {
- DeInitEngine();
- }
- return false;
+ mState = kAllocated;
+ sChannelsOpen++;
+ return true;
}
void
MediaEngineWebRTCMicrophoneSource::Shutdown()
{
Super::Shutdown();
if (mListener) {
// breaks a cycle, since the WebRTCAudioDataListener has a RefPtr to us
@@ -935,54 +795,16 @@ MediaEngineWebRTCMicrophoneSource::Shutd
// on last Deallocate(), FreeChannel()s and DeInit()s if all channels are released
Deallocate(mRegisteredHandles[0].get());
}
MOZ_ASSERT(mState == kReleased);
mAudioInput = nullptr;
}
-typedef int16_t sample;
-
-void
-MediaEngineWebRTCMicrophoneSource::Process(int channel,
- webrtc::ProcessingTypes type,
- sample *audio10ms, size_t length,
- int samplingFreq, bool isStereo)
-{
- MOZ_ASSERT(!PassThrough(), "This should be bypassed when in PassThrough mode.");
- // On initial capture, throw away all far-end data except the most recent sample
- // since it's already irrelevant and we want to keep avoid confusing the AEC far-end
- // input code with "old" audio.
- if (!mStarted) {
- mStarted = true;
- while (mAudioOutputObserver->Size() > 1) {
- free(mAudioOutputObserver->Pop()); // only call if size() > 0
- }
- }
-
- while (mAudioOutputObserver->Size() > 0) {
- FarEndAudioChunk *buffer = mAudioOutputObserver->Pop(); // only call if size() > 0
- if (buffer) {
- int length = buffer->mSamples;
- int res = mVoERender->ExternalPlayoutData(buffer->mData,
- mAudioOutputObserver->PlayoutFrequency(),
- mAudioOutputObserver->PlayoutChannels(),
- length);
- free(buffer);
- if (res == -1) {
- return;
- }
- }
- }
-
- uint32_t channels = isStereo ? 2 : 1;
- InsertInGraph<int16_t>(audio10ms, length, channels);
-}
-
void
MediaEngineWebRTCAudioCaptureSource::GetName(nsAString &aName) const
{
aName.AssignLiteral("AudioCapture");
}
void
MediaEngineWebRTCAudioCaptureSource::GetUUID(nsACString &aUUID) const