--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
@@ -69,23 +69,16 @@ WebrtcAudioConduit::~WebrtcAudioConduit(
NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
for(auto & codec : mRecvCodecList)
{
delete codec;
}
- // The first one of a pair to be deleted shuts down media for both
- if(mPtrVoEXmedia)
- {
- mPtrVoEXmedia->SetExternalRecordingStatus(false);
- mPtrVoEXmedia->SetExternalPlayoutStatus(false);
- }
-
//Deal with the transport
if(mPtrVoENetwork)
{
mPtrVoENetwork->DeRegisterExternalTransport(mChannel);
}
if(mPtrVoEBase)
{
@@ -352,18 +345,19 @@ MediaConduitErrorCode WebrtcAudioConduit
}
if(!(mPtrVoEBase = VoEBase::GetInterface(mVoiceEngine)))
{
CSFLogError(LOGTAG, "%s Unable to initialize VoEBase", __FUNCTION__);
return kMediaConduitSessionNotInited;
}
- // init the engine with our audio device layer
- if(mPtrVoEBase->Init() == -1)
+ // Init the engine with a fake audio device (we're using cubeb for audio input
+ // and output anyways).
+ if(mPtrVoEBase->Init(mFakeAudioDevice.get()) == -1)
{
CSFLogError(LOGTAG, "%s VoiceEngine Base Not Initialized", __FUNCTION__);
return kMediaConduitSessionNotInited;
}
if(!(mPtrVoENetwork = VoENetwork::GetInterface(mVoiceEngine)))
{
CSFLogError(LOGTAG, "%s Unable to initialize VoENetwork", __FUNCTION__);
@@ -418,31 +412,17 @@ MediaConduitErrorCode WebrtcAudioConduit
CSFLogDebug(LOGTAG, "%s Channel Created %d ",__FUNCTION__, mChannel);
if(mPtrVoENetwork->RegisterExternalTransport(mChannel, *this) == -1)
{
CSFLogError(LOGTAG, "%s VoiceEngine, External Transport Failed",__FUNCTION__);
return kMediaConduitTransportRegistrationFail;
}
- if(mPtrVoEXmedia->SetExternalRecordingStatus(true) == -1)
- {
- CSFLogError(LOGTAG, "%s SetExternalRecordingStatus Failed %d",__FUNCTION__,
- mPtrVoEBase->LastError());
- return kMediaConduitExternalPlayoutError;
- }
-
- if(mPtrVoEXmedia->SetExternalPlayoutStatus(true) == -1)
- {
- CSFLogError(LOGTAG, "%s SetExternalPlayoutStatus Failed %d ",__FUNCTION__,
- mPtrVoEBase->LastError());
- return kMediaConduitExternalRecordingError;
- }
-
- CSFLogDebug(LOGTAG , "%s AudioSessionConduit Initialization Done (%p)",__FUNCTION__, this);
+ CSFLogDebug(LOGTAG, "%s AudioSessionConduit Initialization Done (%p)",__FUNCTION__, this);
return kMediaConduitNoError;
}
// AudioSessionConduit Implementation
MediaConduitErrorCode
WebrtcAudioConduit::SetTransmitterTransport(RefPtr<TransportInterface> aTransport)
{
CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
@@ -712,31 +692,31 @@ WebrtcAudioConduit::SendAudioFrame(const
}
if (MOZ_LOG_TEST(GetLatencyLog(), LogLevel::Debug)) {
struct Processing insert = { TimeStamp::Now(), 0 };
mProcessing.AppendElement(insert);
}
capture_delay = mCaptureDelay;
- //Insert the samples
+ // Insert the samples
mPtrVoEBase->audio_transport()->PushCaptureData(mChannel, audio_data,
sizeof(audio_data[0])*8, // bits
samplingFreqHz,
channels,
lengthSamples);
// we should be good here
return kMediaConduitNoError;
}
MediaConduitErrorCode
WebrtcAudioConduit::GetAudioFrame(int16_t speechData[],
- int32_t samplingFreqHz,
- int32_t capture_delay,
- int& lengthSamples)
+ int32_t samplingFreqHz,
+ int32_t capture_delay,
+ int& lengthSamples)
{
CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
unsigned int numSamples = 0;
//validate params
if(!speechData )
{
@@ -767,30 +747,33 @@ WebrtcAudioConduit::GetAudioFrame(int16_
{
CSFLogError(LOGTAG, "%s Engine not Receiving ", __FUNCTION__);
return kMediaConduitSessionNotInited;
}
lengthSamples = 0; //output paramter
- if(mPtrVoEXmedia->ExternalPlayoutGetData( speechData,
- samplingFreqHz,
- capture_delay,
- lengthSamples) == -1)
- {
+ if (mPtrVoEXmedia->GetAudioFrame(mChannel,
+ samplingFreqHz,
+ &mAudioFrame) != 0) {
int error = mPtrVoEBase->LastError();
CSFLogError(LOGTAG, "%s Getting audio data Failed %d", __FUNCTION__, error);
if(error == VE_RUNTIME_PLAY_ERROR)
{
return kMediaConduitPlayoutError;
}
return kMediaConduitUnknownError;
}
+ // XXX Annoying, have to copy to our buffers -- refactor?
+ lengthSamples = mAudioFrame.samples_per_channel_ * mAudioFrame.num_channels_;
+ PodCopy(speechData, mAudioFrame.data_,
+ lengthSamples);
+
// Not #ifdef DEBUG or on a log module so we can use it for about:webrtc/etc
mSamples += lengthSamples;
if (mSamples >= mLastSyncLog + samplingFreqHz) {
int jitter_buffer_delay_ms;
int playout_buffer_delay_ms;
int avsync_offset_ms;
if (GetAVStats(&jitter_buffer_delay_ms,
&playout_buffer_delay_ms,
@@ -959,16 +942,23 @@ WebrtcAudioConduit::StartReceiving()
CSFLogError(LOGTAG , "%s StartReceive Failed %d ",__FUNCTION__, error);
if(error == VE_RECV_SOCKET_ERROR)
{
return kMediaConduitSocketError;
}
return kMediaConduitUnknownError;
}
+ // we can't call GetAudioFrame() if we don't enable "external" mixing
+ if(mPtrVoEXmedia->SetExternalMixing(mChannel, true) == -1)
+ {
+ CSFLogError(LOGTAG, "%s SetExternalMixing Failed", __FUNCTION__);
+ return kMediaConduitPlayoutError;
+ }
+
if(mPtrVoEBase->StartPlayout(mChannel) == -1)
{
CSFLogError(LOGTAG, "%s Starting playout Failed", __FUNCTION__);
return kMediaConduitPlayoutError;
}
mEngineReceiving = true;
}
--- a/media/webrtc/signaling/src/peerconnection/TransceiverImpl.cpp
+++ b/media/webrtc/signaling/src/peerconnection/TransceiverImpl.cpp
@@ -1005,18 +1005,17 @@ static void StartTrack(MediaStream* aSou
{
class Message : public ControlMessage {
public:
Message(MediaStream* aStream, nsAutoPtr<MediaSegment>&& aSegment)
: ControlMessage(aStream),
segment_(aSegment) {}
void Run() override {
- TrackRate track_rate = segment_->GetType() == MediaSegment::AUDIO ?
- WEBRTC_DEFAULT_SAMPLE_RATE : mStream->GraphRate();
+ TrackRate track_rate = mStream->GraphRate();
StreamTime current_end = mStream->GetTracksEnd();
MOZ_MTLOG(ML_DEBUG, "current_end = " << current_end);
TrackTicks current_ticks =
mStream->TimeToTicksRoundUp(track_rate, current_end);
// Add a track 'now' to avoid possible underrun, especially if we add
// a track "later".
@@ -1028,17 +1027,17 @@ static void StartTrack(MediaStream* aSou
// To avoid assertions, we need to insert a dummy segment that covers up
// to the "start" time for the track
segment_->AppendNullData(current_ticks);
MOZ_MTLOG(ML_DEBUG, "segment_->GetDuration() = " << segment_->GetDuration());
if (segment_->GetType() == MediaSegment::AUDIO) {
MOZ_MTLOG(ML_DEBUG, "Calling AddAudioTrack");
mStream->AsSourceStream()->AddAudioTrack(
kAudioTrack,
- WEBRTC_DEFAULT_SAMPLE_RATE,
+ track_rate,
0,
static_cast<AudioSegment*>(segment_.forget()));
} else {
mStream->AsSourceStream()->AddTrack(kVideoTrack, 0, segment_.forget());
}
mStream->AsSourceStream()->SetPullEnabled(true);
mStream->AsSourceStream()->AdvanceKnownTracksTime(STREAM_TIME_MAX);
--- a/media/webrtc/trunk/webrtc/voice_engine/include/voe_external_media.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/include/voe_external_media.h
@@ -3,37 +3,16 @@
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-// In some cases it is desirable to use an audio source or sink which may
-// not be available to the VoiceEngine, such as a DV camera. This sub-API
-// contains functions that allow for the use of such external recording
-// sources and playout sinks. It also describes how recorded data, or data
-// to be played out, can be modified outside the VoiceEngine.
-//
-// Usage example, omitting error checking:
-//
-// using namespace webrtc;
-// VoiceEngine* voe = VoiceEngine::Create();
-// VoEBase* base = VoEBase::GetInterface(voe);
-// VoEMediaProcess media = VoEMediaProcess::GetInterface(voe);
-// base->Init();
-// ...
-// media->SetExternalRecordingStatus(true);
-// ...
-// base->Terminate();
-// base->Release();
-// media->Release();
-// VoiceEngine::Delete(voe);
-//
#ifndef WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_H
#define WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_H
#include "webrtc/common_types.h"
namespace webrtc {
class VoiceEngine;
@@ -80,48 +59,16 @@ class WEBRTC_DLLEXPORT VoEExternalMedia
int channel,
ProcessingTypes type,
VoEMediaProcess& processObject) = 0;
// Removes the VoEMediaProcess derived instance and deactivates external
// media for the specified |channel| and |type|.
virtual int DeRegisterExternalMediaProcessing(int channel,
ProcessingTypes type) = 0;
-
- // Toogles state of external recording.
- virtual int SetExternalRecordingStatus(bool enable) = 0;
-
- // Toogles state of external playout.
- virtual int SetExternalPlayoutStatus(bool enable) = 0;
-
- // This function accepts externally recorded audio. During transmission,
- // this method should be called at as regular an interval as possible
- // with frames of corresponding size.
- virtual int ExternalRecordingInsertData(
- const int16_t speechData10ms[], int lengthSamples,
- int samplingFreqHz, int current_delay_ms) = 0;
-
-
- // This function inserts audio written to the OS audio drivers for use
- // as the far-end signal for AEC processing. The length of the block
- // must be 160, 320, 441 or 480 samples (for 16000, 32000, 44100 or
- // 48000 kHz sampling rates respectively).
- virtual int ExternalPlayoutData(
- int16_t speechData10ms[], int samplingFreqHz, int num_channels,
- int& lengthSamples) = 0;
-
- // This function gets audio for an external playout sink.
- // During transmission, this function should be called every ~10 ms
- // to obtain a new 10 ms frame of audio. The length of the block will
- // be 160, 320, 441 or 480 samples (for 16000, 32000, 44100 or
- // 48000 kHz sampling rates respectively).
- virtual int ExternalPlayoutGetData(
- int16_t speechData10ms[], int samplingFreqHz,
- int current_delay_ms, int& lengthSamples) = 0;
-
// Pulls an audio frame from the specified |channel| for external mixing.
// If the |desired_sample_rate_hz| is 0, the signal will be returned with
// its native frequency, otherwise it will be resampled. Valid frequencies
// are 16000, 22050, 32000, 44100 or 48000 kHz.
virtual int GetAudioFrame(int channel,
int desired_sample_rate_hz,
AudioFrame* frame) = 0;
--- a/media/webrtc/trunk/webrtc/voice_engine/test/auto_test/standard/external_media_test.cc
+++ b/media/webrtc/trunk/webrtc/voice_engine/test/auto_test/standard/external_media_test.cc
@@ -24,53 +24,16 @@ class ExternalMediaTest : public AfterSt
TEST_LOG("Back to normal.\n");
EXPECT_EQ(0, voe_xmedia_->DeRegisterExternalMediaProcessing(
channel, type));
Sleep(2000);
}
};
-TEST_F(ExternalMediaTest, ManualCanRecordAndPlaybackUsingExternalPlayout) {
- SwitchToManualMicrophone();
-
- EXPECT_EQ(0, voe_base_->StopSend(channel_));
- EXPECT_EQ(0, voe_base_->StopPlayout(channel_));
- EXPECT_EQ(0, voe_xmedia_->SetExternalPlayoutStatus(true));
- EXPECT_EQ(0, voe_base_->StartPlayout(channel_));
- EXPECT_EQ(0, voe_base_->StartSend(channel_));
-
- TEST_LOG("Recording data for 2 seconds starting now: please speak.\n");
- int16_t recording[32000];
- for (int i = 0; i < 200; i++) {
- int sample_length = 0;
- EXPECT_EQ(0, voe_xmedia_->ExternalPlayoutGetData(
- &(recording[i * 160]), 16000, 100, sample_length));
- EXPECT_EQ(160, sample_length);
- Sleep(10);
- }
-
- EXPECT_EQ(0, voe_base_->StopSend(channel_));
- EXPECT_EQ(0, voe_base_->StopPlayout(channel_));
- EXPECT_EQ(0, voe_xmedia_->SetExternalPlayoutStatus(false));
- EXPECT_EQ(0, voe_base_->StartPlayout(channel_));
- EXPECT_EQ(0, voe_xmedia_->SetExternalRecordingStatus(true));
- EXPECT_EQ(0, voe_base_->StartSend(channel_));
-
- TEST_LOG("Playing back recording, you should hear what you said earlier.\n");
- for (int i = 0; i < 200; i++) {
- EXPECT_EQ(0, voe_xmedia_->ExternalRecordingInsertData(
- &(recording[i * 160]), 160, 16000, 20));
- Sleep(10);
- }
-
- EXPECT_EQ(0, voe_base_->StopSend(channel_));
- EXPECT_EQ(0, voe_xmedia_->SetExternalRecordingStatus(false));
-}
-
TEST_F(ExternalMediaTest,
ManualRegisterExternalMediaProcessingOnAllChannelsAffectsPlayout) {
TEST_LOG("Enabling external media processing: audio should be affected.\n");
TestRegisterExternalMedia(-1, webrtc::kPlaybackAllChannelsMixed);
}
TEST_F(ExternalMediaTest,
ManualRegisterExternalMediaOnSingleChannelAffectsPlayout) {
--- a/media/webrtc/trunk/webrtc/voice_engine/voe_external_media_impl.cc
+++ b/media/webrtc/trunk/webrtc/voice_engine/voe_external_media_impl.cc
@@ -26,19 +26,16 @@ VoEExternalMedia* VoEExternalMedia::GetI
}
VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
s->AddRef();
return s;
}
VoEExternalMediaImpl::VoEExternalMediaImpl(voe::SharedData* shared)
:
-#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
- playout_delay_ms_(0),
-#endif
shared_(shared) {
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(shared_->instance_id(), -1),
"VoEExternalMediaImpl() - ctor");
}
VoEExternalMediaImpl::~VoEExternalMediaImpl() {
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(shared_->instance_id(), -1),
"~VoEExternalMediaImpl() - dtor");
@@ -111,277 +108,16 @@ int VoEExternalMediaImpl::DeRegisterExte
case kRecordingAllChannelsMixed:
case kRecordingPreprocessing: {
return shared_->transmit_mixer()->DeRegisterExternalMediaProcessing(type);
}
}
return -1;
}
-int VoEExternalMediaImpl::SetExternalRecordingStatus(bool enable)
-{
- WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(shared_->instance_id(), -1),
- "SetExternalRecordingStatus(enable=%d)", enable);
-#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
- if (shared_->audio_device() && shared_->audio_device()->Recording())
- {
- shared_->SetLastError(VE_ALREADY_SENDING, kTraceError,
- "SetExternalRecordingStatus() cannot set state while sending");
- return -1;
- }
- shared_->set_ext_recording(enable);
- return 0;
-#else
- shared_->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
- "SetExternalRecordingStatus() external recording is not supported");
- return -1;
-#endif
-}
-
-int VoEExternalMediaImpl::ExternalRecordingInsertData(
- const int16_t speechData10ms[],
- int lengthSamples,
- int samplingFreqHz,
- int current_delay_ms)
-{
- WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(shared_->instance_id(), -1),
- "ExternalRecordingInsertData(speechData10ms=0x%x,"
- " lengthSamples=%u, samplingFreqHz=%d, current_delay_ms=%d)",
- &speechData10ms[0], lengthSamples, samplingFreqHz,
- current_delay_ms);
-#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
- if (!shared_->statistics().Initialized())
- {
- shared_->SetLastError(VE_NOT_INITED, kTraceError);
- return -1;
- }
- if (!shared_->ext_recording())
- {
- shared_->SetLastError(VE_INVALID_OPERATION, kTraceError,
- "ExternalRecordingInsertData() external recording is not enabled");
- return -1;
- }
- if (shared_->NumOfSendingChannels() == 0)
- {
- shared_->SetLastError(VE_ALREADY_SENDING, kTraceError,
- "SetExternalRecordingStatus() no channel is sending");
- return -1;
- }
- if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
- (48000 != samplingFreqHz) && (44100 != samplingFreqHz))
- {
- shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
- "SetExternalRecordingStatus() invalid sample rate");
- return -1;
- }
- if ((0 == lengthSamples) ||
- ((lengthSamples % (samplingFreqHz / 100)) != 0))
- {
- shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
- "SetExternalRecordingStatus() invalid buffer size");
- return -1;
- }
- if (current_delay_ms < 0)
- {
- shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
- "SetExternalRecordingStatus() invalid delay)");
- return -1;
- }
-
- uint16_t blockSize = samplingFreqHz / 100;
- // We know the number of samples for 10ms of audio, so we can derive the
- // number of channels here:
- uint32_t channels = lengthSamples * 100 / samplingFreqHz;
- uint32_t nBlocks = lengthSamples / blockSize / channels;
- int16_t totalDelayMS = 0;
- uint16_t playoutDelayMS = 0;
-
- for (uint32_t i = 0; i < nBlocks; i++)
- {
- if (!shared_->ext_playout())
- {
- // Use real playout delay if external playout is not enabled.
- if (shared_->audio_device()->PlayoutDelay(&playoutDelayMS) != 0) {
- shared_->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
- "PlayoutDelay() unable to get the playout delay");
- }
- totalDelayMS = current_delay_ms + playoutDelayMS;
- }
- else
- {
- // Use stored delay value given the last call
- // to ExternalPlayoutGetData.
- totalDelayMS = current_delay_ms + playout_delay_ms_;
- // Compensate for block sizes larger than 10ms
- totalDelayMS -= (int16_t)(i*10);
- if (totalDelayMS < 0)
- totalDelayMS = 0;
- }
- shared_->transmit_mixer()->PrepareDemux(
- (const int8_t*)(&speechData10ms[i*blockSize]),
- blockSize,
- channels,
- samplingFreqHz,
- totalDelayMS,
- 0,
- 0,
- false); // Typing detection not supported
-
- shared_->transmit_mixer()->DemuxAndMix();
- shared_->transmit_mixer()->EncodeAndSend();
- }
- return 0;
-#else
- shared_->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
- "ExternalRecordingInsertData() external recording is not supported");
- return -1;
-#endif
-}
-
-int VoEExternalMediaImpl::SetExternalPlayoutStatus(bool enable)
-{
- WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(shared_->instance_id(), -1),
- "SetExternalPlayoutStatus(enable=%d)", enable);
-#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
- if (shared_->audio_device() && shared_->audio_device()->Playing())
- {
- shared_->SetLastError(VE_ALREADY_SENDING, kTraceError,
- "SetExternalPlayoutStatus() cannot set state while playing");
- return -1;
- }
- shared_->set_ext_playout(enable);
- return 0;
-#else
- shared_->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
- "SetExternalPlayoutStatus() external playout is not supported");
- return -1;
-#endif
-}
-
-// This inserts a copy of the raw audio sent to the output drivers to use
-// as the "far end" signal for the AEC. Currently only 10ms chunks are
-// supported unfortunately. Since we have to rechunk to 10ms to call this,
-// thre isn't much gained by allowing N*10ms here; external code can loop
-// if needed.
-int VoEExternalMediaImpl::ExternalPlayoutData(
- int16_t speechData10ms[],
- int samplingFreqHz,
- int num_channels,
- int& lengthSamples)
-{
- WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(shared_->instance_id(), -1),
- "ExternalPlayoutData(speechData10ms=0x%x,"
- " lengthSamples=%u, samplingFreqHz=%d)",
- &speechData10ms[0], lengthSamples, samplingFreqHz);
-
-#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
- if (!shared_->statistics().Initialized())
- {
- shared_->SetLastError(VE_NOT_INITED, kTraceError);
- return -1;
- }
- // FIX(jesup) - check if this is enabled?
- if (shared_->NumOfSendingChannels() == 0)
- {
- shared_->SetLastError(VE_ALREADY_SENDING, kTraceError,
- "SetExternalRecordingStatus() no channel is sending");
- return -1;
- }
- if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
- (48000 != samplingFreqHz) && (44100 != samplingFreqHz))
- {
- shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
- "SetExternalRecordingStatus() invalid sample rate");
- return -1;
- }
-
- // Far-end data is inserted without going through neteq/etc.
- // Only supports 10ms chunks; AnalyzeReverseStream() enforces that
- // lower down.
- AudioFrame audioFrame;
- audioFrame.UpdateFrame(-1, 0xFFFFFFFF,
- speechData10ms,
- lengthSamples,
- samplingFreqHz,
- AudioFrame::kNormalSpeech,
- AudioFrame::kVadUnknown,
- num_channels);
-
- shared_->output_mixer()->APMAnalyzeReverseStream(audioFrame);
-#endif
- return 0;
-}
-
-int VoEExternalMediaImpl::ExternalPlayoutGetData(
- int16_t speechData10ms[],
- int samplingFreqHz,
- int current_delay_ms,
- int& lengthSamples)
-{
- WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(shared_->instance_id(), -1),
- "ExternalPlayoutGetData(speechData10ms=0x%x, samplingFreqHz=%d"
- ", current_delay_ms=%d)", &speechData10ms[0], samplingFreqHz,
- current_delay_ms);
-#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
- if (!shared_->statistics().Initialized())
- {
- shared_->SetLastError(VE_NOT_INITED, kTraceError);
- return -1;
- }
- if (!shared_->ext_playout())
- {
- shared_->SetLastError(VE_INVALID_OPERATION, kTraceError,
- "ExternalPlayoutGetData() external playout is not enabled");
- return -1;
- }
- if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
- (48000 != samplingFreqHz) && (44100 != samplingFreqHz))
- {
- shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
- "ExternalPlayoutGetData() invalid sample rate");
- return -1;
- }
- if (current_delay_ms < 0)
- {
- shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
- "ExternalPlayoutGetData() invalid delay)");
- return -1;
- }
-
- AudioFrame audioFrame;
-
- uint32_t channels = shared_->output_mixer()->GetOutputChannelCount();
- // If we have not received any data yet, consider it's mono since it's the
- // most common case.
- if (channels == 0) {
- channels = 1;
- }
-
- // Retrieve mixed output at the specified rate
- shared_->output_mixer()->MixActiveChannels();
- shared_->output_mixer()->DoOperationsOnCombinedSignal(true);
- shared_->output_mixer()->GetMixedAudio(samplingFreqHz, channels, &audioFrame);
-
- // Deliver audio (PCM) samples to the external sink
- memcpy(speechData10ms,
- audioFrame.data_,
- sizeof(int16_t)*audioFrame.samples_per_channel_*channels);
- lengthSamples = audioFrame.samples_per_channel_ * channels;
-
- // Store current playout delay (to be used by ExternalRecordingInsertData).
- playout_delay_ms_ = current_delay_ms;
-
- return 0;
-#else
- shared_->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
- "ExternalPlayoutGetData() external playout is not supported");
- return -1;
-#endif
-}
-
int VoEExternalMediaImpl::GetAudioFrame(int channel, int desired_sample_rate_hz,
AudioFrame* frame) {
if (!shared_->statistics().Initialized()) {
shared_->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ChannelOwner ch = shared_->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
--- a/media/webrtc/trunk/webrtc/voice_engine/voe_external_media_impl.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/voe_external_media_impl.h
@@ -21,50 +21,25 @@ class VoEExternalMediaImpl : public VoEE
public:
int RegisterExternalMediaProcessing(int channel,
ProcessingTypes type,
VoEMediaProcess& processObject) override;
int DeRegisterExternalMediaProcessing(int channel,
ProcessingTypes type) override;
- virtual int SetExternalRecordingStatus(bool enable) override;
-
- virtual int SetExternalPlayoutStatus(bool enable) override;
-
- virtual int ExternalRecordingInsertData(
- const int16_t speechData10ms[],
- int lengthSamples,
- int samplingFreqHz,
- int current_delay_ms) override;
-
- // Insertion of far-end data as actually played out to the OS audio driver
- virtual int ExternalPlayoutData(
- int16_t speechData10ms[],
- int samplingFreqHz,
- int num_channels,
- int& lengthSamples) override;
-
- virtual int ExternalPlayoutGetData(int16_t speechData10ms[],
- int samplingFreqHz,
- int current_delay_ms,
- int& lengthSamples) override;
-
int GetAudioFrame(int channel,
int desired_sample_rate_hz,
AudioFrame* frame) override;
int SetExternalMixing(int channel, bool enable) override;
protected:
VoEExternalMediaImpl(voe::SharedData* shared);
~VoEExternalMediaImpl() override;
private:
-#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
- int playout_delay_ms_;
-#endif
voe::SharedData* shared_;
};
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_IMPL_H