--- a/dom/bindings/moz.build
+++ b/dom/bindings/moz.build
@@ -87,16 +87,17 @@ LOCAL_INCLUDES += [
'/js/xpconnect/wrappers',
'/layout/generic',
'/layout/style',
'/layout/xul/tree',
'/media/mtransport',
'/media/webrtc/',
'/media/webrtc/signaling/src/common/time_profiling',
'/media/webrtc/signaling/src/peerconnection',
+ '/media/webrtc/trunk/',
]
UNIFIED_SOURCES += [
'BindingUtils.cpp',
'CallbackInterface.cpp',
'CallbackObject.cpp',
'Date.cpp',
'DOMJSProxyHandler.cpp',
--- a/dom/media/bridge/moz.build
+++ b/dom/media/bridge/moz.build
@@ -18,14 +18,15 @@ LOCAL_INCLUDES += [
'/ipc/chromium/src',
'/media/mtransport',
'/media/mtransport',
'/media/webrtc/',
'/media/webrtc/signaling/src/common/time_profiling',
'/media/webrtc/signaling/src/media-conduit',
'/media/webrtc/signaling/src/mediapipeline',
'/media/webrtc/signaling/src/peerconnection',
+ '/media/webrtc/trunk/',
]
FINAL_LIBRARY = 'xul'
if CONFIG['GNU_CXX']:
CXXFLAGS += ['-Wno-error=shadow']
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
@@ -276,16 +276,23 @@ bool WebrtcAudioConduit::InsertDTMFTone(
int result = 0;
if (outOfBand){
result = mChannelProxy->SendTelephoneEventOutband(eventCode, lengthMs);
}
return result != -1;
}
+void
+WebrtcAudioConduit::OnRtpCsrcAudioLevels(const webrtc::WebRtcRTPHeader* aHeader) {
+ if (mAudioLevelObserver) {
+ mAudioLevelObserver->OnRtpCsrcAudioLevels(aHeader);
+ }
+}
+
/*
* WebRTCAudioConduit Implementation
*/
MediaConduitErrorCode WebrtcAudioConduit::Init()
{
CSFLogDebug(logTag, "%s this=%p", __FUNCTION__, this);
#ifdef MOZ_WIDGET_ANDROID
@@ -363,16 +370,17 @@ MediaConduitErrorCode WebrtcAudioConduit
{
CSFLogError(logTag, "%s VoiceEngine Channel creation failed",__FUNCTION__);
return kMediaConduitChannelError;
}
// Needed to access TelephoneEvent APIs in 57 if we're not using Call/audio_send_stream/etc
webrtc::VoiceEngineImpl* s = static_cast<webrtc::VoiceEngineImpl*>(mVoiceEngine);
mChannelProxy = s->GetChannelProxy(mChannel);
MOZ_ASSERT(mChannelProxy);
+ mChannelProxy->SetRtpAudioLevelObserver(this);
CSFLogDebug(logTag, "%s Channel Created %d ",__FUNCTION__, mChannel);
if(mPtrVoENetwork->RegisterExternalTransport(mChannel, *this) == -1)
{
CSFLogError(logTag, "%s VoiceEngine, External Transport Failed",__FUNCTION__);
return kMediaConduitTransportRegistrationFail;
}
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.h
@@ -8,16 +8,17 @@
#include "mozilla/Attributes.h"
#include "mozilla/ReentrantMonitor.h"
#include "mozilla/TimeStamp.h"
#include "nsTArray.h"
#include "MediaConduitInterface.h"
#include "MediaEngineWrapper.h"
+#include "AudioLevelObserver.h"
// Audio Engine Includes
#include "webrtc/common_types.h"
#include "webrtc/voice_engine/include/voe_base.h"
#include "webrtc/voice_engine/include/voe_volume_control.h"
#include "webrtc/voice_engine/include/voe_codec.h"
#include "webrtc/voice_engine/include/voe_file.h"
#include "webrtc/voice_engine/include/voe_network.h"
@@ -44,17 +45,18 @@ namespace mozilla {
DOMHighResTimeStamp
NTPtoDOMHighResTimeStamp(uint32_t ntpHigh, uint32_t ntpLow);
/**
* Concrete class for Audio session. Hooks up
* - media-source and target to external transport
*/
class WebrtcAudioConduit: public AudioSessionConduit
- , public webrtc::Transport
+ , public webrtc::Transport
+ , public webrtc::RtpAudioLevelObserver
{
public:
//VoiceEngine defined constant for Payload Name Size.
static const unsigned int CODEC_PLNAME_SIZE;
/**
* APIs used by the registered external transport to this Conduit to
* feed in received RTP Frames to the VoiceEngine for decoding
@@ -177,17 +179,18 @@ public:
mEngineTransmitting(false),
mEngineReceiving(false),
mChannel(-1),
mDtmfEnabled(false),
mCodecMutex("AudioConduit codec db"),
mCaptureDelay(150),
mLastTimestamp(0),
mSamples(0),
- mLastSyncLog(0)
+ mLastSyncLog(0),
+ mAudioLevelObserver(new AudioLevelObserver())
{
}
virtual ~WebrtcAudioConduit();
MediaConduitErrorCode Init();
int GetChannel() { return mChannel; }
@@ -245,16 +248,20 @@ public:
unsigned int* packetsSent,
uint64_t* bytesSent) override;
bool SetDtmfPayloadType(unsigned char type, int freq) override;
bool InsertDTMFTone(int channel, int eventCode, bool outOfBand,
int lengthMs, int attenuationDb) override;
+ RefPtr<AudioLevelObserver>
+ virtual GetAudioLevelObserver() override { return mAudioLevelObserver; }
+
+ void OnRtpCsrcAudioLevels(const webrtc::WebRtcRTPHeader* aRtpHeader) override;
private:
WebrtcAudioConduit(const WebrtcAudioConduit& other) = delete;
void operator=(const WebrtcAudioConduit& other) = delete;
//Local database of currently applied receive codecs
typedef std::vector<AudioCodecConfig* > RecvCodecList;
//Function to convert between WebRTC and Conduit codec structures
@@ -316,13 +323,15 @@ private:
// Current "capture" delay (really output plus input delay)
int32_t mCaptureDelay;
uint32_t mLastTimestamp;
uint32_t mSamples;
uint32_t mLastSyncLog;
+
+ RefPtr<AudioLevelObserver> mAudioLevelObserver;
};
} // end namespace
#endif
new file mode 100644
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/AudioLevelObserver.cpp
@@ -0,0 +1,62 @@
+#include "AudioLevelObserver.h"
+#include "nsThreadUtils.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "mozilla/dom/RTCRtpContributingSourceBinding.h"
+
+namespace mozilla {
+
+AudioLevelObserver::AudioLevelObserver() :
+ mLevelGuard("AudioLevelObserver::mLevelGuard") {}
+
+void
+AudioLevelObserver::OnRtpCsrcAudioLevels(const webrtc::WebRtcRTPHeader* aHeader)
+{
+ const int64_t timestamp = aHeader->ntp_time_ms;
+ auto& header = aHeader->header;
+ if (timestamp < 10000) {
+ return;
+ }
+ auto& list = header.extension.csrcAudioLevels;
+ {
+ MutexAutoLock lock(mLevelGuard);
+ const uint8_t num = std::min(header.numCSRCs, list.numAudioLevels);
+ if (num != header.numCSRCs) {
+ return;
+ }
+ for (uint8_t i = 0; i < num; i++) {
+ const uint32_t& csrc = header.arrOfCSRCs[i];
+ mCsrcAudioLevels[csrc].Update(csrc,
+ timestamp,
+ list.arrOfAudioLevels[i]);
+ }
+ const auto expiry = timestamp - (10 * 1000);
+ // Prune old entries
+ size_t overage = mCsrcAudioLevels.size() - num; // Max number removable
+ auto it = mCsrcAudioLevels.begin();
+ while (overage && it != mCsrcAudioLevels.end()) {
+ if (it->second.timestamp < expiry) {
+ it = mCsrcAudioLevels.erase(it);
+ overage--;
+ continue;
+ }
+ it++;
+ }
+ }
+}
+
+void
+AudioLevelObserver::GetCsrcAudioLevels(
+ nsTArray<dom::RTCRtpContributingSourceEntry>& outLevels) const
+{
+ outLevels.Clear();
+ MutexAutoLock lock(mLevelGuard);
+ for (const auto& it : mCsrcAudioLevels) {
+ dom::RTCRtpContributingSourceEntry domEntry;
+ domEntry.mSource.Construct(it.second.source);
+ domEntry.mTimestamp.Construct(it.second.timestamp);
+ domEntry.mAudioLevel.Construct(it.second.audioLevel);
+ outLevels.AppendElement(domEntry);
+ }
+}
+
+}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/AudioLevelObserver.h
@@ -0,0 +1,66 @@
+#ifndef AUDIOLEVELOBSERVER_H
+#define AUDIOLEVELOBSERVER_H
+
+#include <vector>
+#include <map>
+
+#include "mozilla/Mutex.h"
+#include "nsISupportsImpl.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_audio_level_observer.h"
+
+namespace mozilla {
+
+namespace dom {
+ struct RTCRtpContributingSourceEntry;
+}
+/* Observes changes in audio levels reported by:
+ * csrc-audio-level RTP header extension
+*/
+class AudioLevelObserver: public webrtc::RtpAudioLevelObserver {
+public:
+
+ AudioLevelObserver();
+
+ virtual void
+ OnRtpCsrcAudioLevels(const webrtc::WebRtcRTPHeader* aRtpHeader) override;
+
+ // Expected to be held in a RefPtr<>
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioLevelObserver);
+
+ struct AudioLevelEntry {
+ void Update(const uint32_t aSource,
+ const int64_t aTimestamp,
+ const uint8_t aAudioLevel) {
+ source = aSource;
+ timestamp = aTimestamp;
+ audioLevel = aAudioLevel;
+ }
+ // Either the CSRC or SSRC
+ uint32_t source;
+ // Time this information was received
+ int64_t timestamp;
+ uint8_t audioLevel;
+ };
+
+ /*
+ * Get the most recent 10 second window of CSRC audio levels.
+ * @param outLevels will be popluted with audio level entries
+ * NOTE: the 10 second window is with reguards to the last time levels were
+ * updated not the current time.
+ */
+ void GetCsrcAudioLevels(
+ nsTArray<dom::RTCRtpContributingSourceEntry>& outLevels) const;
+
+private:
+ virtual ~AudioLevelObserver() {};
+ // Do not copy or assign
+ AudioLevelObserver(const AudioLevelObserver&) = delete;
+ AudioLevelObserver& operator=(AudioLevelObserver const&) = delete;
+
+ // Map CSRC to AudioLevelEntry
+ std::map<uint32_t, AudioLevelEntry> mCsrcAudioLevels;
+ // Guards statistics
+ mutable Mutex mLevelGuard;
+};
+}
+#endif // AUDIOLEVELOBSERVER_H
\ No newline at end of file
--- a/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
+++ b/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
@@ -7,16 +7,17 @@
#include "nsISupportsImpl.h"
#include "nsXPCOM.h"
#include "nsDOMNavigationTiming.h"
#include "mozilla/RefPtr.h"
#include "mozilla/RefCounted.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/utils.h"
+#include "AudioLevelObserver.h"
#include "CodecConfig.h"
#include "VideoTypes.h"
#include "MediaConduitErrors.h"
#include "ImageContainer.h"
#include "webrtc/call.h"
#include "webrtc/config.h"
@@ -517,12 +518,17 @@ public:
*/
virtual MediaConduitErrorCode EnableAudioLevelExtension(bool enabled, uint8_t id) = 0;
virtual MediaConduitErrorCode EnableMIDExtension(bool enabled, uint8_t id) = 0;
virtual bool SetDtmfPayloadType(unsigned char type, int freq) = 0;
virtual bool InsertDTMFTone(int channel, int eventCode, bool outOfBand,
int lengthMs, int attenuationDb) = 0;
+ /*
+ * Returns a reference to an AudioLevelObserver which observes
+ * the audio levels reported by received RTP header csrc-audio-level
+ */
+ virtual RefPtr<AudioLevelObserver> GetAudioLevelObserver() = 0;
};
}
#endif
--- a/media/webrtc/signaling/src/media-conduit/moz.build
+++ b/media/webrtc/signaling/src/media-conduit/moz.build
@@ -15,16 +15,17 @@ LOCAL_INCLUDES += [
'/media/webrtc/signaling/src/common/browser_logging',
'/media/webrtc/signaling/src/common/time_profiling',
'/media/webrtc/signaling/src/peerconnection',
'/media/webrtc/trunk',
]
SOURCES += [
'AudioConduit.cpp',
+ 'AudioLevelObserver.cpp',
'VideoConduit.cpp',
]
UNIFIED_SOURCES += [
'GmpVideoCodec.cpp',
'MediaDataDecoderCodec.cpp',
'WebrtcGmpVideoCodec.cpp',
'WebrtcMediaDataDecoderCodec.cpp',
--- a/media/webrtc/signaling/src/peerconnection/MediaPipelineFactory.cpp
+++ b/media/webrtc/signaling/src/peerconnection/MediaPipelineFactory.cpp
@@ -681,18 +681,20 @@ MediaPipelineFactory::GetOrCreateAudioCo
mPCMedia->GetAudioConduit(aTrackPair.mLevel);
if (!conduit) {
conduit = AudioSessionConduit::Create();
if (!conduit) {
MOZ_MTLOG(ML_ERROR, "Could not create audio conduit");
return NS_ERROR_FAILURE;
}
-
mPCMedia->AddAudioConduit(aTrackPair.mLevel, conduit);
+ if (receiving) {
+ mPC->RegisterAudioLevelObserver(aTrack, conduit->GetAudioLevelObserver());
+ }
}
PtrVector<AudioCodecConfig> configs;
nsresult rv = NegotiatedDetailsToAudioCodecConfigs(
*aTrack.GetNegotiatedDetails(), &configs);
if (NS_FAILED(rv)) {
MOZ_MTLOG(ML_ERROR, "Failed to convert JsepCodecDescriptions to "
--- a/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp
@@ -77,16 +77,17 @@
#include "nsIURLParser.h"
#include "nsIDOMDataChannel.h"
#include "NullPrincipal.h"
#include "mozilla/PeerIdentity.h"
#include "mozilla/dom/RTCCertificate.h"
#include "mozilla/dom/RTCConfigurationBinding.h"
#include "mozilla/dom/RTCDTMFSenderBinding.h"
#include "mozilla/dom/RTCDTMFToneChangeEvent.h"
+#include "mozilla/dom/RTCRtpContributingSourceBinding.h"
#include "mozilla/dom/RTCRtpSenderBinding.h"
#include "mozilla/dom/RTCStatsReportBinding.h"
#include "mozilla/dom/RTCPeerConnectionBinding.h"
#include "mozilla/dom/PeerConnectionImplBinding.h"
#include "mozilla/dom/DataChannelBinding.h"
#include "mozilla/dom/PerformanceTiming.h"
#include "mozilla/dom/PluginCrashedEvent.h"
#include "MediaStreamList.h"
@@ -312,16 +313,17 @@ PeerConnectionImpl::PeerConnectionImpl(c
, mUuidGen(MakeUnique<PCUuidGenerator>())
, mHaveConfiguredCodecs(false)
, mHaveDataStream(false)
, mAddCandidateErrorCount(0)
, mTrickle(true) // TODO(ekr@rtfm.com): Use pref
, mNegotiationNeeded(false)
, mPrivateWindow(false)
, mActiveOnWindow(false)
+ , mAudioLevelGuard("PeerConnectionImpl::AudioLevels")
, mPacketDumpEnabled(false)
, mPacketDumpFlagsMutex("Packet dump flags mutex")
{
MOZ_ASSERT(NS_IsMainThread());
auto log = RLogConnector::CreateInstance();
if (aGlobal) {
mWindow = do_QueryInterface(aGlobal->GetAsSupports());
if (IsPrivateBrowsing(mWindow)) {
@@ -2651,16 +2653,17 @@ PeerConnectionImpl::InsertDTMF(mozilla::
if (!state->mTones.IsEmpty()) {
state->mSendTimer->InitWithNamedFuncCallback(DTMFSendTimerCallback_m, state, 0,
nsITimer::TYPE_ONE_SHOT,
"DTMFSendTimerCallback_m");
}
return NS_OK;
}
+
NS_IMETHODIMP
PeerConnectionImpl::GetDTMFToneBuffer(mozilla::dom::RTCRtpSender& sender,
nsAString& outToneBuffer) {
PC_AUTO_ENTER_API_CALL(false);
JSErrorResult jrv;
// Retrieve track
@@ -2679,16 +2682,53 @@ PeerConnectionImpl::GetDTMFToneBuffer(mo
outToneBuffer = dtmfState.mTones;
break;
}
}
return NS_OK;
}
+void
+PeerConnectionImpl::RegisterAudioLevelObserver(
+ const JsepTrack& aTrack,
+ RefPtr<AudioLevelObserver> aObserver)
+{
+ {
+ MutexAutoLock lock(mAudioLevelGuard);
+ mAudioLevelObservers[aTrack.GetTrackId()] = std::move(aObserver);
+ }
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::GetContributingSources(
+ MediaStreamTrack& aTrack,
+ nsTArray<dom::RTCRtpContributingSourceEntry>&
+ outContributingSources)
+{
+ PC_AUTO_ENTER_API_CALL(false);
+
+ std::string trackId = PeerConnectionImpl::GetTrackId(aTrack);
+ outContributingSources.Clear();
+ {
+ RefPtr<AudioLevelObserver> observer;
+ {
+ MutexAutoLock lock(mAudioLevelGuard);
+ const auto& it = mAudioLevelObservers.find(trackId);
+ if (it == mAudioLevelObservers.end()) {
+ NS_WARNING("No audio level observer registered for track!");
+ return NS_OK;
+ }
+ observer = it->second;
+ }
+ observer->GetCsrcAudioLevels(outContributingSources);
+ }
+ return NS_OK;
+}
+
NS_IMETHODIMP
PeerConnectionImpl::ReplaceTrack(MediaStreamTrack& aThisTrack,
MediaStreamTrack& aWithTrack) {
PC_AUTO_ENTER_API_CALL(true);
nsString trackId;
aThisTrack.GetId(trackId);
--- a/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.h
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.h
@@ -24,17 +24,17 @@
#include "nsPIDOMWindow.h"
#include "nsIUUIDGenerator.h"
#include "nsIThread.h"
#include "mozilla/Mutex.h"
#include "signaling/src/jsep/JsepSession.h"
#include "signaling/src/jsep/JsepSessionImpl.h"
#include "signaling/src/sdp/SdpMediaSection.h"
-
+#include "signaling/src/media-conduit/AudioLevelObserver.h"
#include "mozilla/ErrorResult.h"
#include "mozilla/dom/PeerConnectionImplEnumsBinding.h"
#include "mozilla/dom/RTCPeerConnectionBinding.h" // mozPacketDumpType, maybe move?
#include "PrincipalChangeObserver.h"
#include "StreamTracks.h"
#include "mozilla/TimeStamp.h"
#include "mozilla/net/DataChannel.h"
@@ -62,16 +62,17 @@ class NrIceStunServer;
class NrIceTurnServer;
class MediaPipeline;
class DOMMediaStream;
namespace dom {
class RTCCertificate;
struct RTCConfiguration;
+struct RTCRtpContributingSourceEntry;
class RTCDTMFSender;
struct RTCIceServer;
struct RTCOfferOptions;
struct RTCRtpParameters;
class RTCRtpSender;
class MediaStreamTrack;
#ifdef USE_FAKE_PCOBSERVER
@@ -418,16 +419,23 @@ public:
}
NS_IMETHODIMP_TO_ERRORRESULT(GetDTMFToneBuffer, ErrorResult &rv,
dom::RTCRtpSender& sender,
nsAString& outToneBuffer) {
rv = GetDTMFToneBuffer(sender, outToneBuffer);
}
+ NS_IMETHODIMP_TO_ERRORRESULT(GetContributingSources, ErrorResult &rv,
+ dom::MediaStreamTrack& aMediaStreamTrack,
+ nsTArray<dom::RTCRtpContributingSourceEntry>&
+ outContributingSources) {
+ rv = GetContributingSources(aMediaStreamTrack, outContributingSources);
+ }
+
NS_IMETHODIMP_TO_ERRORRESULT(ReplaceTrack, ErrorResult &rv,
mozilla::dom::MediaStreamTrack& aThisTrack,
mozilla::dom::MediaStreamTrack& aWithTrack)
{
rv = ReplaceTrack(aThisTrack, aWithTrack);
}
NS_IMETHODIMP_TO_ERRORRESULT(SetParameters, ErrorResult &rv,
@@ -641,16 +649,19 @@ public:
// PeerConnectionMedia can't do it because it doesn't know about principals
virtual void PrincipalChanged(dom::MediaStreamTrack* aTrack) override;
static std::string GetStreamId(const DOMMediaStream& aStream);
static std::string GetTrackId(const dom::MediaStreamTrack& track);
void OnMediaError(const std::string& aError);
+ void RegisterAudioLevelObserver(const JsepTrack& aTrack,
+ RefPtr<AudioLevelObserver> aObserver);
+
bool ShouldDumpPacket(size_t level, dom::mozPacketDumpType type,
bool sending) const;
void DumpPacket_m(size_t level, dom::mozPacketDumpType type, bool sending,
UniquePtr<uint8_t[]>& packet, size_t size);
private:
virtual ~PeerConnectionImpl();
@@ -832,16 +843,20 @@ private:
uint32_t mDuration;
uint32_t mInterToneGap;
};
static void
DTMFSendTimerCallback_m(nsITimer* timer, void*);
nsTArray<DTMFState> mDTMFStates;
+ // Guards the map of audio level observers
+ Mutex mAudioLevelGuard;
+ // Map of TrackId to audio levels
+ std::map<std::string, RefPtr<AudioLevelObserver>> mAudioLevelObservers;
std::vector<unsigned> mSendPacketDumpFlags;
std::vector<unsigned> mRecvPacketDumpFlags;
Atomic<bool> mPacketDumpEnabled;
mutable Mutex mPacketDumpFlagsMutex;
public:
//these are temporary until the DataChannel Listen/Connect API is removed
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/BUILD.gn
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/BUILD.gn
@@ -9,16 +9,17 @@
import("../../build/webrtc.gni")
rtc_static_library("rtp_rtcp") {
sources = [
"include/flexfec_receiver.h",
"include/flexfec_sender.h",
"include/receive_statistics.h",
"include/remote_ntp_time_estimator.h",
+ "include/rtp_audio_level_observer.h",
"include/rtp_header_parser.h",
"include/rtp_payload_registry.h",
"include/rtp_receiver.h",
"include/rtp_rtcp.h",
"include/rtp_rtcp_defines.h",
"include/ulpfec_receiver.h",
"source/byte_io.h",
"source/dtmf_queue.cc",
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/include/rtp_audio_level_observer.h
@@ -0,0 +1,15 @@
+#ifndef RTP_AUDIO_LEVEL_OBSERVER_H
+#define RTP_AUDIO_LEVEL_OBSERVER_H
+
+namespace webrtc {
+
+struct WebRtcRTPHeader;
+
+class RtpAudioLevelObserver {
+ public:
+ virtual void OnRtpCsrcAudioLevels(const WebRtcRTPHeader* aRtpHeader) = 0;
+};
+
+}
+
+#endif // RTP_AUDIO_LEVEL_OBSERVER_H
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/rtp_rtcp.gypi
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/rtp_rtcp.gypi
@@ -18,16 +18,17 @@
],
'sources': [
# Common
'include/fec_receiver.h',
'include/flexfec_receiver.h',
'include/flexfec_sender.h',
'include/receive_statistics.h',
'include/remote_ntp_time_estimator.h',
+ 'include/rtp_audio_level_observer.h',
'include/rtp_header_parser.h',
'include/rtp_payload_registry.h',
'include/rtp_receiver.h',
'include/rtp_rtcp.h',
'include/rtp_rtcp_defines.h',
'source/byte_io.h',
'source/fec_private_tables_bursty.h',
'source/fec_private_tables_random.h',
--- a/media/webrtc/trunk/webrtc/voice_engine/channel.cc
+++ b/media/webrtc/trunk/webrtc/voice_engine/channel.cc
@@ -24,16 +24,17 @@
#include "webrtc/base/timeutils.h"
#include "webrtc/config.h"
#include "webrtc/logging/rtc_event_log/rtc_event_log.h"
#include "webrtc/modules/audio_coding/codecs/audio_format_conversion.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/pacing/packet_router.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_audio_level_observer.h"
#include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_receiver.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h"
#include "webrtc/modules/utility/include/process_thread.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/include/voe_external_media.h"
#include "webrtc/voice_engine/include/voe_rtp_rtcp.h"
@@ -591,16 +592,20 @@ bool Channel::GetRTCPReceiverStatistics(
if (sentPackets) {
uint64_t sentBytes = rtpCounters.MediaPayloadBytes();
*bytesReceived = sentBytes * (*packetsReceived) / sentPackets;
}
}
return true;
}
+void Channel::SetRtpAudioLevelObserver(RtpAudioLevelObserver* observer) {
+ audio_level_observer_ = observer;
+}
+
int32_t Channel::SendData(FrameType frameType,
uint8_t payloadType,
uint32_t timeStamp,
const uint8_t* payloadData,
size_t payloadSize,
const RTPFragmentationHeader* fragmentation) {
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
"Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u,"
@@ -751,16 +756,20 @@ int32_t Channel::OnInitializeDecoder(
}
return 0;
}
int32_t Channel::OnReceivedPayloadData(const uint8_t* payloadData,
size_t payloadSize,
const WebRtcRTPHeader* rtpHeader) {
+ if (rtpHeader->header.extension.csrcAudioLevels.numAudioLevels
+ && audio_level_observer_) {
+ audio_level_observer_->OnRtpCsrcAudioLevels(rtpHeader);
+ }
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
"Channel::OnReceivedPayloadData(payloadSize=%" PRIuS
","
" payloadType=%u, audioChannel=%" PRIuS ")",
payloadSize, rtpHeader->header.payloadType,
rtpHeader->type.Audio.channel);
if (!channel_state_.Get().playing) {
--- a/media/webrtc/trunk/webrtc/voice_engine/channel.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/channel.h
@@ -45,16 +45,17 @@ namespace webrtc {
class AudioDeviceModule;
class FileWrapper;
class PacketRouter;
class ProcessThread;
class RateLimiter;
class ReceiveStatistics;
class RemoteNtpTimeEstimator;
class RtcEventLog;
+class RtpAudioLevelObserver;
class RTPPayloadRegistry;
class RtpReceiver;
class RTPReceiverAudio;
class RtpRtcp;
class TelephoneEventHandler;
class VoEMediaProcess;
class VoERTPObserver;
class VoiceEngineObserver;
@@ -427,16 +428,18 @@ class Channel
bool GetRTCPReceiverStatistics(int64_t* timestamp,
uint32_t* jitterMs,
uint32_t* cumulativeLost,
uint32_t* packetsReceived,
uint64_t* bytesReceived,
double* packetsFractionLost,
int64_t* rtt) const;
+ virtual void SetRtpAudioLevelObserver(RtpAudioLevelObserver* observer);
+
protected:
void OnIncomingFractionLoss(int fraction_lost);
void OnIncomingReceiverReports(const ReportBlockList& aReportBlocks,
const int64_t aRoundTripTime,
const int64_t aReceptionTime);
private:
bool ReceivePacket(const uint8_t* packet,
@@ -565,14 +568,16 @@ class Channel
PacketRouter* packet_router_ = nullptr;
std::unique_ptr<TransportFeedbackProxy> feedback_observer_proxy_;
std::unique_ptr<TransportSequenceNumberProxy> seq_num_allocator_proxy_;
std::unique_ptr<RtpPacketSenderProxy> rtp_packet_sender_proxy_;
std::unique_ptr<RateLimiter> retransmission_rate_limiter_;
// TODO(ossu): Remove once GetAudioDecoderFactory() is no longer needed.
rtc::scoped_refptr<AudioDecoderFactory> decoder_factory_;
+
+ RtpAudioLevelObserver* audio_level_observer_ = nullptr;
};
} // namespace voe
} // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_CHANNEL_H_
--- a/media/webrtc/trunk/webrtc/voice_engine/channel_proxy.cc
+++ b/media/webrtc/trunk/webrtc/voice_engine/channel_proxy.cc
@@ -288,16 +288,20 @@ void ChannelProxy::DisassociateSendChann
RTC_DCHECK(thread_checker_.CalledOnValidThread());
channel()->set_associate_send_channel(ChannelOwner(nullptr));
}
void ChannelProxy::SetRtcpRttStats(RtcpRttStats* rtcp_rtt_stats) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
channel()->SetRtcpRttStats(rtcp_rtt_stats);
}
+void ChannelProxy::SetRtpAudioLevelObserver(RtpAudioLevelObserver* observer) {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ channel()->SetRtpAudioLevelObserver(observer);
+}
Channel* ChannelProxy::channel() const {
RTC_DCHECK(channel_owner_.channel());
return channel_owner_.channel();
}
} // namespace voe
} // namespace webrtc
--- a/media/webrtc/trunk/webrtc/voice_engine/channel_proxy.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/channel_proxy.h
@@ -26,16 +26,17 @@ namespace webrtc {
class AudioSinkInterface;
class PacketRouter;
class RtcEventLog;
class RtcpRttStats;
class RtpPacketSender;
class Transport;
class TransportFeedbackObserver;
+class RtpAudioLevelObserver;
namespace voe {
class Channel;
// This class provides the "view" of a voe::Channel that we need to implement
// webrtc::AudioSendStream and webrtc::AudioReceiveStream. It serves two
// purposes:
@@ -106,16 +107,19 @@ class ChannelProxy {
AudioFrame* audio_frame);
virtual int NeededFrequency() const;
virtual void SetTransportOverhead(int transport_overhead_per_packet);
virtual void AssociateSendChannel(const ChannelProxy& send_channel_proxy);
virtual void DisassociateSendChannel();
virtual void SetRtcpRttStats(RtcpRttStats* rtcp_rtt_stats);
+ virtual void SetRtpAudioLevelObserver(
+ RtpAudioLevelObserver* observer);
+
private:
Channel* channel() const;
rtc::ThreadChecker thread_checker_;
rtc::RaceChecker race_checker_;
ChannelOwner channel_owner_;
RTC_DISALLOW_COPY_AND_ASSIGN(ChannelProxy);