Bug 1353841 - add audioLevel to RTCMediaStreamTrackStats draft
authorNico Grunbaum
Wed, 05 Apr 2017 12:09:56 -0700
changeset 556366 eb0d4adda005f50e4fe420bd703711cb0ce5ee94
parent 555943 1f5e8370f845e5bfe8f5d14a69ad3c848cedfe45
child 622870 6eaf61ab94ba890e94c7a602ad1fc87f913a3a86
push id52527
push userna-g@nostrum.com
push dateWed, 05 Apr 2017 19:10:17 +0000
bugs1353841
milestone55.0a1
Bug 1353841 - add audioLevel to RTCMediaStreamTrackStats This patch contains the webrtc.org changes, and media pipeline changes necessary to get the audioLevel plumbed to PeerConnectionImpl, where it can eventually be used to populate the field. MozReview-Commit-ID: 5UuHayyVKZ3
media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
media/webrtc/signaling/src/media-conduit/AudioConduit.h
media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
media/webrtc/signaling/src/media-conduit/VideoConduit.h
media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp
media/webrtc/trunk/webrtc/voice_engine/channel.cc
media/webrtc/trunk/webrtc/voice_engine/channel.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_volume_control.h
media/webrtc/trunk/webrtc/voice_engine/level_indicator.cc
media/webrtc/trunk/webrtc/voice_engine/level_indicator.h
media/webrtc/trunk/webrtc/voice_engine/output_mixer.cc
media/webrtc/trunk/webrtc/voice_engine/output_mixer.h
media/webrtc/trunk/webrtc/voice_engine/transmit_mixer.cc
media/webrtc/trunk/webrtc/voice_engine/transmit_mixer.h
media/webrtc/trunk/webrtc/voice_engine/voe_volume_control_impl.cc
media/webrtc/trunk/webrtc/voice_engine/voe_volume_control_impl.h
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
@@ -101,16 +101,17 @@ WebrtcAudioConduit::~WebrtcAudioConduit(
   mPtrVoENetwork = nullptr;
   mPtrVoEBase = nullptr;
   mPtrVoECodec = nullptr;
   mPtrVoEXmedia = nullptr;
   mPtrVoEProcessing = nullptr;
   mPtrVoEVideoSync = nullptr;
   mPtrVoERTP_RTCP = nullptr;
   mPtrRTP = nullptr;
+  mPtrVoEVolumeCtrl = nullptr;
 
   if (mVoiceEngine)
   {
     webrtc::VoiceEngine::Delete(mVoiceEngine);
   }
 }
 
 bool WebrtcAudioConduit::SetLocalSSRCs(const std::vector<unsigned int> & aSSRCs)
@@ -179,16 +180,31 @@ bool WebrtcAudioConduit::GetRTPStats(uns
   unsigned int maxJitterMs = 0;
   unsigned int discardedPackets;
   *jitterMs = 0;
   *cumulativeLost = 0;
   return !mPtrRTP->GetRTPStatistics(mChannel, *jitterMs, maxJitterMs,
                                     discardedPackets, *cumulativeLost);
 }
 
+bool WebrtcAudioConduit::GetAudioEncoderStats(double* audioLevel) {
+  if (!mEngineTransmitting) {
+    return false;
+  }
+  return !mPtrVoEVolumeCtrl->GetSpeechInputLevelNormalized(*audioLevel);
+}
+
+bool WebrtcAudioConduit::GetAudioDecoderStats(double* audioLevel) {
+  if (!mEngineReceiving) {
+    return false;
+  }
+  return !mPtrVoEVolumeCtrl->GetSpeechOutputLevelNormalized(mChannel,
+                                                            *audioLevel);
+}
+
 DOMHighResTimeStamp
 NTPtoDOMHighResTimeStamp(uint32_t ntpHigh, uint32_t ntpLow) {
   return (uint32_t(ntpHigh - webrtc::kNtpJan1970) +
           double(ntpLow) / webrtc::kMagicNtpFractionalUnit) * 1000;
 }
 
 bool WebrtcAudioConduit::GetRTCPReceiverReport(DOMHighResTimeStamp* timestamp,
                                                uint32_t* jitterMs,
@@ -337,16 +353,24 @@ MediaConduitErrorCode WebrtcAudioConduit
   }
   if (!(mPtrRTP = webrtc::VoERTP_RTCP::GetInterface(mVoiceEngine)))
   {
     CSFLogError(logTag, "%s Unable to get audio RTP/RTCP interface ",
                 __FUNCTION__);
     return kMediaConduitSessionNotInited;
   }
 
+  if (!(mPtrVoEVolumeCtrl
+      = webrtc::VoEVolumeControl::GetInterface(mVoiceEngine)))
+  {
+    CSFLogError(logTag, "%s Unable to get audio volume control interface ",
+                __FUNCTION__);
+    return kMediaConduitSessionNotInited;
+  }
+
   if( (mChannel = mPtrVoEBase->CreateChannel()) == -1)
   {
     CSFLogError(logTag, "%s VoiceEngine Channel creation failed",__FUNCTION__);
     return kMediaConduitChannelError;
   }
 
   CSFLogDebug(logTag, "%s Channel Created %d ",__FUNCTION__, mChannel);
 
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.h
@@ -215,16 +215,18 @@ public:
                             double* bitrateStdDev,
                             uint32_t* discardedPackets) override
   {
     return false;
   }
   bool GetAVStats(int32_t* jitterBufferDelayMs,
                   int32_t* playoutBufferDelayMs,
                   int32_t* avSyncOffsetMs) override;
+  bool GetAudioEncoderStats(double* audioLevel) override;
+  bool GetAudioDecoderStats(double* audioLevel) override;
   bool GetRTPStats(unsigned int* jitterMs, unsigned int* cumulativeLost) override;
   bool GetRTCPReceiverReport(DOMHighResTimeStamp* timestamp,
                              uint32_t* jitterMs,
                              uint32_t* packetsReceived,
                              uint64_t* bytesReceived,
                              uint32_t *cumulativeLost,
                              int32_t* rttMs) override;
   bool GetRTCPSenderReport(DOMHighResTimeStamp* timestamp,
@@ -274,16 +276,17 @@ private:
   ScopedCustomReleasePtr<webrtc::VoENetwork>   mPtrVoENetwork;
   ScopedCustomReleasePtr<webrtc::VoEBase>      mPtrVoEBase;
   ScopedCustomReleasePtr<webrtc::VoECodec>     mPtrVoECodec;
   ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mPtrVoEXmedia;
   ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mPtrVoEProcessing;
   ScopedCustomReleasePtr<webrtc::VoEVideoSync> mPtrVoEVideoSync;
   ScopedCustomReleasePtr<webrtc::VoERTP_RTCP>  mPtrVoERTP_RTCP;
   ScopedCustomReleasePtr<webrtc::VoERTP_RTCP>  mPtrRTP;
+  ScopedCustomReleasePtr<webrtc::VoEVolumeControl> mPtrVoEVolumeCtrl;
   //engine states of our interets
   mozilla::Atomic<bool> mEngineTransmitting; // If true => VoiceEngine Send-subsystem is up
   mozilla::Atomic<bool> mEngineReceiving;    // If true => VoiceEngine Receive-subsystem is up
                             // and playout is enabled
   // Keep track of each inserted RTP block and the time it was inserted
   // so we can estimate the clock time for a specific TimeStamp coming out
   // (for when we send data to MediaStreamTracks).  Blocks are aged out as needed.
   struct Processing {
--- a/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
+++ b/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
@@ -259,16 +259,18 @@ public:
   virtual bool GetVideoDecoderStats(double* framerateMean,
                                     double* framerateStdDev,
                                     double* bitrateMean,
                                     double* bitrateStdDev,
                                     uint32_t* discardedPackets) = 0;
   virtual bool GetAVStats(int32_t* jitterBufferDelayMs,
                           int32_t* playoutBufferDelayMs,
                           int32_t* avSyncOffsetMs) = 0;
+  virtual bool GetAudioEncoderStats(double* audioLevel) = 0;
+  virtual bool GetAudioDecoderStats(double* audioLevel) = 0;
   virtual bool GetRTPStats(unsigned int* jitterMs,
                            unsigned int* cumulativeLost) = 0;
   virtual bool GetRTCPReceiverReport(DOMHighResTimeStamp* timestamp,
                                      uint32_t* jitterMs,
                                      uint32_t* packetsReceived,
                                      uint64_t* bytesReceived,
                                      uint32_t* cumulativeLost,
                                      int32_t* rttMs) = 0;
--- a/media/webrtc/signaling/src/media-conduit/VideoConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/VideoConduit.h
@@ -309,16 +309,18 @@ public:
   bool GetVideoDecoderStats(double* framerateMean,
                             double* framerateStdDev,
                             double* bitrateMean,
                             double* bitrateStdDev,
                             uint32_t* discardedPackets) override;
   bool GetAVStats(int32_t* jitterBufferDelayMs,
                   int32_t* playoutBufferDelayMs,
                   int32_t* avSyncOffsetMs) override;
+  bool GetAudioEncoderStats(double* audioLevel) override { return false; };
+  bool GetAudioDecoderStats(double* audioLevel) override { return false; };
   bool GetRTPStats(unsigned int* jitterMs, unsigned int* cumulativeLost) override;
   bool GetRTCPReceiverReport(DOMHighResTimeStamp* timestamp,
                              uint32_t* jitterMs,
                              uint32_t* packetsReceived,
                              uint64_t* bytesReceived,
                              uint32_t* cumulativeLost,
                              int32_t* rttMs) override;
   bool GetRTCPSenderReport(DOMHighResTimeStamp* timestamp,
--- a/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp
@@ -3655,18 +3655,25 @@ PeerConnectionImpl::ExecuteStatsQuery_s(
             s.mSsrc.Construct(ssrc);
           }
           s.mMediaType.Construct(mediaType);
           s.mRemoteId.Construct(remoteId);
           s.mIsRemote = false;
           s.mPacketsSent.Construct(mp.rtp_packets_sent());
           s.mBytesSent.Construct(mp.rtp_bytes_sent());
 
-          // Lastly, fill in video encoder stats if this is video
-          if (!isAudio) {
+          // Fill in audio encoder stats if this is audio
+          if (isAudio) {
+            // TODO: @@NG audioLevel is actually part of
+            //  RTCMediatStreamTrackStats
+            // double audioLevel;
+            // if (mp.Conduit().getAudioEncoderStats(&audioLevel)) {
+            //   s.mAudioLevel.Construct(audioLevel);
+            // }
+          } else { // Lastly, fill in video encoder stats if this is video
             double framerateMean;
             double framerateStdDev;
             double bitrateMean;
             double bitrateStdDev;
             uint32_t droppedFrames;
             if (mp.Conduit()->GetVideoEncoderStats(&framerateMean,
                                                    &framerateStdDev,
                                                    &bitrateMean,
@@ -3743,18 +3750,25 @@ PeerConnectionImpl::ExecuteStatsQuery_s(
           int32_t avSyncDelta;
           if (mp.Conduit()->GetAVStats(&jitterBufferDelay,
                                        &playoutBufferDelay,
                                        &avSyncDelta)) {
             s.mMozJitterBufferDelay.Construct(jitterBufferDelay);
             s.mMozAvSyncDelay.Construct(avSyncDelta);
           }
         }
-        // Lastly, fill in video decoder stats if this is video
-        if (!isAudio) {
+        // Fill in Audio decoder stats if this is video
+        if (isAudio) {
+          // TODO: @@NG audioLevel is actually part of
+          //  RTCMediatStreamTrackStats
+          // double audioLevel;
+          // if (mp.Conduit()->GetAudioDecoderStats(&audioLevel)) {
+          //   s.mAudioLevel.Construct(audioLevel);
+          // }
+        } else { // Lastly, fill in video decoder stats if this is video
           double framerateMean;
           double framerateStdDev;
           double bitrateMean;
           double bitrateStdDev;
           uint32_t discardedPackets;
           if (mp.Conduit()->GetVideoDecoderStats(&framerateMean,
                                                  &framerateStdDev,
                                                  &bitrateMean,
--- a/media/webrtc/trunk/webrtc/voice_engine/channel.cc
+++ b/media/webrtc/trunk/webrtc/voice_engine/channel.cc
@@ -2448,16 +2448,23 @@ int
 Channel::GetSpeechOutputLevelFullRange(uint32_t& level) const
 {
     int16_t currentLevel = _outputAudioLevel.LevelFullRange();
     level = static_cast<int32_t> (currentLevel);
     return 0;
 }
 
 int
+Channel::GetSpeechOutputLevelNormalized(double& level) const
+{
+    level = _outputAudioLevel.LevelNormalized();
+    return 0;
+}
+
+int
 Channel::SetMute(bool enable)
 {
     CriticalSectionScoped cs(&volume_settings_critsect_);
     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
                "Channel::SetMute(enable=%d)", enable);
     _mute = enable;
     return 0;
 }
--- a/media/webrtc/trunk/webrtc/voice_engine/channel.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/channel.h
@@ -267,16 +267,17 @@ public:
     int RegisterExternalMediaProcessing(ProcessingTypes type,
                                         VoEMediaProcess& processObject);
     int DeRegisterExternalMediaProcessing(ProcessingTypes type);
     int SetExternalMixing(bool enabled);
 
     // VoEVolumeControl
     int GetSpeechOutputLevel(uint32_t& level) const;
     int GetSpeechOutputLevelFullRange(uint32_t& level) const;
+    int GetSpeechOutputLevelNormalized(double& level) const;
     int SetMute(bool enable);
     bool Mute() const;
     int SetOutputVolumePan(float left, float right);
     int GetOutputVolumePan(float& left, float& right) const;
     int SetChannelOutputVolumeScaling(float scaling);
     int GetChannelOutputVolumeScaling(float& scaling) const;
 
     // VoENetEqStats
--- a/media/webrtc/trunk/webrtc/voice_engine/include/voe_volume_control.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/include/voe_volume_control.h
@@ -85,16 +85,25 @@ class WEBRTC_DLLEXPORT VoEVolumeControl 
   // Gets the microphone speech |level|, mapped linearly to the range
   // [0,32768].
   virtual int GetSpeechInputLevelFullRange(unsigned int& level) = 0;
 
   // Gets the speaker speech |level|, mapped linearly to the range [0,32768].
   virtual int GetSpeechOutputLevelFullRange(int channel,
                                             unsigned int& level) = 0;
 
+  // Gets the microphone speech |level|, mapped to the range 0.0 ... 1.0
+  // via the method to produce RMS in Appendix A of RFC 6465
+  virtual int GetSpeechInputLevelNormalized(double& level) = 0;
+
+  // Gets the speaker speech |level|, mapped to the range 0.0 ... 1.0
+  // via the method to produce RMS in Appendix A of RFC 6465
+  virtual int GetSpeechOutputLevelNormalized(int channel,
+                                             double& level) = 0;
+
   // Sets a volume |scaling| applied to the outgoing signal of a specific
   // channel. Valid scale range is [0.0, 10.0].
   virtual int SetChannelOutputVolumeScaling(int channel, float scaling) = 0;
 
   // Gets the current volume scaling for a specified |channel|.
   virtual int GetChannelOutputVolumeScaling(int channel, float& scaling) = 0;
 
   // Scales volume of the |left| and |right| channels independently.
--- a/media/webrtc/trunk/webrtc/voice_engine/level_indicator.cc
+++ b/media/webrtc/trunk/webrtc/voice_engine/level_indicator.cc
@@ -3,16 +3,18 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include <limits>
+
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 #include "webrtc/modules/include/module_common_types.h"
 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
 #include "webrtc/voice_engine/level_indicator.h"
 
 namespace webrtc {
 
 namespace voe {
@@ -24,17 +26,18 @@ const int8_t permutation[33] =
     {0,1,2,3,4,4,5,5,5,5,6,6,6,6,6,7,7,7,7,8,8,8,9,9,9,9,9,9,9,9,9,9,9};
 
 
 AudioLevel::AudioLevel() :
     _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
     _absMax(0),
     _count(0),
     _currentLevel(0),
-    _currentLevelFullRange(0) {
+    _currentLevelFullRange(0),
+    _currentLevelNormalized(0) {
 }
 
 AudioLevel::~AudioLevel() {
     delete &_critSect;
 }
 
 void AudioLevel::Clear()
 {
@@ -49,26 +52,41 @@ void AudioLevel::ComputeLevel(const Audi
 {
     int16_t absValue(0);
 
     // Check speech level (works for 2 channels as well)
     absValue = WebRtcSpl_MaxAbsValueW16(
         audioFrame.data_,
         audioFrame.samples_per_channel_*audioFrame.num_channels_);
 
+    // Calculate RMS of audio level via method in Appendix A of RFC 6465
+    const auto& data = audioFrame.data_;
+    double rms = 0;
+    size_t length = audioFrame.samples_per_channel_ * audioFrame.num_channels_;
+    for(size_t index = 0; index < length; index++) {
+        double sample = audioFrame.data_[index]
+                        / std::numeric_limits<int16_t>::max();
+        rms += sample * sample;
+    }
+    if (length) {
+        rms = sqrt(rms / length);
+    }
+
     // Protect member access using a lock since this method is called on a
     // dedicated audio thread in the RecordedDataIsAvailable() callback.
     CriticalSectionScoped cs(&_critSect);
 
     if (absValue > _absMax)
     _absMax = absValue;
 
     // Update level approximately 10 times per second
     if (_count++ == kUpdateFrequency)
     {
+        _currentLevelNormalized = rms;
+
         _currentLevelFullRange = _absMax;
 
         _count = 0;
 
         // Highest value for a int16_t is 0x7fff = 32767
         // Divide with 1000 to get in the range of 0-32 which is the range of
         // the permutation vector
         int32_t position = _absMax/1000;
@@ -93,11 +111,16 @@ int8_t AudioLevel::Level() const
 }
 
 int16_t AudioLevel::LevelFullRange() const
 {
     CriticalSectionScoped cs(&_critSect);
     return _currentLevelFullRange;
 }
 
+double AudioLevel::LevelNormalized() const {
+    CriticalSectionScoped cs(&_critSect);
+    return _currentLevelNormalized;
+}
+
 }  // namespace voe
 
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/voice_engine/level_indicator.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/level_indicator.h
@@ -25,31 +25,35 @@ class AudioLevel
 public:
     AudioLevel();
     virtual ~AudioLevel();
 
     // Called on "API thread(s)" from APIs like VoEBase::CreateChannel(),
     // VoEBase::StopSend(), VoEVolumeControl::GetSpeechOutputLevel().
     int8_t Level() const;
     int16_t LevelFullRange() const;
+    // returns a level normalized to 0 .. 1.0 via the method to produce RMS
+    // in Appendix A of RFC 6465
+    double LevelNormalized() const;
     void Clear();
 
     // Called on a native capture audio thread (platform dependent) from the
     // AudioTransport::RecordedDataIsAvailable() callback.
     // In Chrome, this method is called on the AudioInputDevice thread.
     void ComputeLevel(const AudioFrame& audioFrame);
 
 private:
     enum { kUpdateFrequency = 10};
 
     CriticalSectionWrapper& _critSect;
 
     int16_t _absMax;
     int16_t _count;
     int8_t _currentLevel;
     int16_t _currentLevelFullRange;
+    double _currentLevelNormalized;
 };
 
 }  // namespace voe
 
 }  // namespace webrtc
 
 #endif // WEBRTC_VOICE_ENGINE_LEVEL_INDICATOR_H
--- a/media/webrtc/trunk/webrtc/voice_engine/output_mixer.cc
+++ b/media/webrtc/trunk/webrtc/voice_engine/output_mixer.cc
@@ -248,16 +248,25 @@ OutputMixer::GetSpeechOutputLevelFullRan
     int16_t currentLevel = _audioLevel.LevelFullRange();
     level = static_cast<uint32_t> (currentLevel);
     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
                  "GetSpeechOutputLevelFullRange() => level=%u", level);
     return 0;
 }
 
 int
+OutputMixer::GetSpeechOutputLevelNormalized(double& level)
+{
+    level = _audioLevel.LevelNormalized();
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetSpeechOutputLevelNormalized() => level=%f", level);
+    return 0;
+}
+
+int
 OutputMixer::SetOutputVolumePan(float left, float right)
 {
     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
                  "OutputMixer::SetOutputVolumePan()");
     _panLeft = left;
     _panRight = right;
     return 0;
 }
--- a/media/webrtc/trunk/webrtc/voice_engine/output_mixer.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/output_mixer.h
@@ -66,16 +66,18 @@ public:
     int GetMixedAudio(int sample_rate_hz, size_t num_channels,
                       AudioFrame* audioFrame);
 
     // VoEVolumeControl
     int GetSpeechOutputLevel(uint32_t& level);
 
     int GetSpeechOutputLevelFullRange(uint32_t& level);
 
+    int GetSpeechOutputLevelNormalized(double& level);
+
     int SetOutputVolumePan(float left, float right);
 
     int GetOutputVolumePan(float& left, float& right);
 
     // VoEFile
     int StartRecordingPlayout(const char* fileName,
                               const CodecInst* codecInst);
 
--- a/media/webrtc/trunk/webrtc/voice_engine/transmit_mixer.cc
+++ b/media/webrtc/trunk/webrtc/voice_engine/transmit_mixer.cc
@@ -1115,16 +1115,22 @@ int8_t TransmitMixer::AudioLevel() const
 }
 
 int16_t TransmitMixer::AudioLevelFullRange() const
 {
     // Speech + file level [0,32767]
     return _audioLevel.LevelFullRange();
 }
 
+double TransmitMixer::AudioLevelNormalized() const
+{
+    // Speech + file level [0.0, 1.0]
+    return _audioLevel.LevelNormalized();
+}
+
 bool TransmitMixer::IsRecordingCall()
 {
     return _fileCallRecording;
 }
 
 bool TransmitMixer::IsRecordingMic()
 {
     CriticalSectionScoped cs(&_critSect);
--- a/media/webrtc/trunk/webrtc/voice_engine/transmit_mixer.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/transmit_mixer.h
@@ -89,16 +89,18 @@ public:
     int SetMute(bool enable);
 
     bool Mute() const;
 
     int8_t AudioLevel() const;
 
     int16_t AudioLevelFullRange() const;
 
+    double AudioLevelNormalized() const;
+
     bool IsRecordingCall();
 
     bool IsRecordingMic();
 
     int StartPlayingFileAsMicrophone(const char* fileName,
                                      bool loop,
                                      FileFormats format,
                                      int startPosition,
--- a/media/webrtc/trunk/webrtc/voice_engine/voe_volume_control_impl.cc
+++ b/media/webrtc/trunk/webrtc/voice_engine/voe_volume_control_impl.cc
@@ -298,16 +298,46 @@ int VoEVolumeControlImpl::GetSpeechOutpu
           "GetSpeechOutputLevelFullRange() failed to locate channel");
       return -1;
     }
     channelPtr->GetSpeechOutputLevelFullRange((uint32_t&)level);
   }
   return 0;
 }
 
+int VoEVolumeControlImpl::GetSpeechInputLevelNormalized(double& level) {
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  level = _shared->transmit_mixer()->AudioLevelNormalized();
+  return 0;
+}
+
+int VoEVolumeControlImpl::GetSpeechOutputLevelNormalized(int channel,
+                                                         double& level) {
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (channel == -1) {
+    return _shared->output_mixer()->GetSpeechOutputLevelNormalized(level);
+  } else {
+    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+    voe::Channel* channelPtr = ch.channel();
+    if (channelPtr == NULL) {
+      _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+          "GetSpeechOutputLevelNormalized() failed to locate channel");
+      return -1;
+    }
+    channelPtr->GetSpeechOutputLevelNormalized(level);
+  }
+  return 0;
+}
+
 int VoEVolumeControlImpl::SetChannelOutputVolumeScaling(int channel,
                                                         float scaling) {
   WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "SetChannelOutputVolumeScaling(channel=%d, scaling=%3.2f)",
                channel, scaling);
   if (!_shared->statistics().Initialized()) {
     _shared->SetLastError(VE_NOT_INITED, kTraceError);
     return -1;
--- a/media/webrtc/trunk/webrtc/voice_engine/voe_volume_control_impl.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/voe_volume_control_impl.h
@@ -34,16 +34,20 @@ class VoEVolumeControlImpl : public VoEV
   int GetSpeechInputLevel(unsigned int& level) override;
 
   int GetSpeechOutputLevel(int channel, unsigned int& level) override;
 
   int GetSpeechInputLevelFullRange(unsigned int& level) override;
 
   int GetSpeechOutputLevelFullRange(int channel, unsigned int& level) override;
 
+  int GetSpeechInputLevelNormalized(double& level) override;
+
+  int GetSpeechOutputLevelNormalized(int channel, double& level) override;
+
   int SetChannelOutputVolumeScaling(int channel, float scaling) override;
 
   int GetChannelOutputVolumeScaling(int channel, float& scaling) override;
 
   int SetOutputVolumePan(int channel, float left, float right) override;
 
   int GetOutputVolumePan(int channel, float& left, float& right) override;