--- a/media/webrtc/signaling/gtest/jsep_session_unittest.cpp
+++ b/media/webrtc/signaling/gtest/jsep_session_unittest.cpp
@@ -4103,80 +4103,91 @@ TEST_F(JsepSessionTest, TestIceOptions)
ASSERT_EQ("trickle", mSessionAns->GetIceOptions()[0]);
}
TEST_F(JsepSessionTest, TestExtmap)
{
AddTracks(*mSessionOff, "audio");
AddTracks(*mSessionAns, "audio");
// ssrc-audio-level will be extmap 1 for both
- mSessionOff->AddAudioRtpExtension("foo"); // Default mapping of 2
- mSessionOff->AddAudioRtpExtension("bar"); // Default mapping of 3
- mSessionAns->AddAudioRtpExtension("bar"); // Default mapping of 2
+ mSessionOff->AddAudioRtpExtension("foo"); // Default mapping of 3
+ mSessionOff->AddAudioRtpExtension("bar"); // Default mapping of 4
+ mSessionAns->AddAudioRtpExtension("bar"); // Default mapping of 4
std::string offer = CreateOffer();
SetLocalOffer(offer, CHECK_SUCCESS);
SetRemoteOffer(offer, CHECK_SUCCESS);
std::string answer = CreateAnswer();
SetLocalAnswer(answer, CHECK_SUCCESS);
SetRemoteAnswer(answer, CHECK_SUCCESS);
UniquePtr<Sdp> parsedOffer(Parse(offer));
ASSERT_EQ(1U, parsedOffer->GetMediaSectionCount());
auto& offerMediaAttrs = parsedOffer->GetMediaSection(0).GetAttributeList();
ASSERT_TRUE(offerMediaAttrs.HasAttribute(SdpAttribute::kExtmapAttribute));
auto& offerExtmap = offerMediaAttrs.GetExtmap().mExtmaps;
- ASSERT_EQ(3U, offerExtmap.size());
+ ASSERT_EQ(4U, offerExtmap.size());
ASSERT_EQ("urn:ietf:params:rtp-hdrext:ssrc-audio-level",
offerExtmap[0].extensionname);
ASSERT_EQ(1U, offerExtmap[0].entry);
- ASSERT_EQ("foo", offerExtmap[1].extensionname);
+ ASSERT_EQ("urn:ietf:params:rtp-hdrext:sdes:mid",
+ offerExtmap[1].extensionname);
ASSERT_EQ(2U, offerExtmap[1].entry);
- ASSERT_EQ("bar", offerExtmap[2].extensionname);
+ ASSERT_EQ("foo", offerExtmap[2].extensionname);
ASSERT_EQ(3U, offerExtmap[2].entry);
+ ASSERT_EQ("bar", offerExtmap[3].extensionname);
+ ASSERT_EQ(4U, offerExtmap[3].entry);
UniquePtr<Sdp> parsedAnswer(Parse(answer));
ASSERT_EQ(1U, parsedAnswer->GetMediaSectionCount());
auto& answerMediaAttrs = parsedAnswer->GetMediaSection(0).GetAttributeList();
ASSERT_TRUE(answerMediaAttrs.HasAttribute(SdpAttribute::kExtmapAttribute));
auto& answerExtmap = answerMediaAttrs.GetExtmap().mExtmaps;
- ASSERT_EQ(1U, answerExtmap.size());
+ ASSERT_EQ(2U, answerExtmap.size());
+ ASSERT_EQ("urn:ietf:params:rtp-hdrext:sdes:mid",
+ answerExtmap[0].extensionname);
+ ASSERT_EQ(1U, offerExtmap[0].entry);
// We ensure that the entry for "bar" matches what was in the offer
- ASSERT_EQ("bar", answerExtmap[0].extensionname);
- ASSERT_EQ(3U, answerExtmap[0].entry);
+ ASSERT_EQ("bar", answerExtmap[1].extensionname);
+ ASSERT_EQ(4U, answerExtmap[1].entry);
}
TEST_F(JsepSessionTest, TestExtmapWithDuplicates)
{
AddTracks(*mSessionOff, "audio");
AddTracks(*mSessionAns, "audio");
// ssrc-audio-level will be extmap 1 for both
- mSessionOff->AddAudioRtpExtension("foo"); // Default mapping of 2
- mSessionOff->AddAudioRtpExtension("bar"); // Default mapping of 3
+ mSessionOff->AddAudioRtpExtension("foo"); // Default mapping of 3
+ mSessionOff->AddAudioRtpExtension("bar"); // Default mapping of 4
mSessionOff->AddAudioRtpExtension("bar"); // Should be ignored
mSessionOff->AddAudioRtpExtension("bar"); // Should be ignored
- mSessionOff->AddAudioRtpExtension("baz"); // Default mapping of 4
+ mSessionOff->AddAudioRtpExtension("baz"); // Default mapping of 5
mSessionOff->AddAudioRtpExtension("bar"); // Should be ignored
std::string offer = CreateOffer();
UniquePtr<Sdp> parsedOffer(Parse(offer));
ASSERT_EQ(1U, parsedOffer->GetMediaSectionCount());
auto& offerMediaAttrs = parsedOffer->GetMediaSection(0).GetAttributeList();
ASSERT_TRUE(offerMediaAttrs.HasAttribute(SdpAttribute::kExtmapAttribute));
auto& offerExtmap = offerMediaAttrs.GetExtmap().mExtmaps;
- ASSERT_EQ(4U, offerExtmap.size());
+ ASSERT_EQ(5U, offerExtmap.size());
ASSERT_EQ("urn:ietf:params:rtp-hdrext:ssrc-audio-level",
offerExtmap[0].extensionname);
ASSERT_EQ(1U, offerExtmap[0].entry);
- ASSERT_EQ("foo", offerExtmap[1].extensionname);
+ ASSERT_EQ("urn:ietf:params:rtp-hdrext:sdes:mid",
+ offerExtmap[1].extensionname);
ASSERT_EQ(2U, offerExtmap[1].entry);
- ASSERT_EQ("bar", offerExtmap[2].extensionname);
+ ASSERT_EQ("foo", offerExtmap[2].extensionname);
ASSERT_EQ(3U, offerExtmap[2].entry);
+ ASSERT_EQ("bar", offerExtmap[3].extensionname);
+ ASSERT_EQ(4U, offerExtmap[3].entry);
+ ASSERT_EQ("baz", offerExtmap[4].extensionname);
+ ASSERT_EQ(5U, offerExtmap[4].entry);
}
TEST_F(JsepSessionTest, TestRtcpFbStar)
{
AddTracks(*mSessionOff, "video");
AddTracks(*mSessionAns, "video");
--- a/media/webrtc/signaling/src/jsep/JsepSessionImpl.cpp
+++ b/media/webrtc/signaling/src/jsep/JsepSessionImpl.cpp
@@ -2384,16 +2384,18 @@ JsepSessionImpl::SetupDefaultCodecs()
red->UpdateRedundantEncodings(mSupportedCodecs.values);
}
void
JsepSessionImpl::SetupDefaultRtpExtensions()
{
AddAudioRtpExtension(webrtc::RtpExtension::kAudioLevelUri,
SdpDirectionAttribute::Direction::kSendonly);
+ AddAudioRtpExtension(webrtc::RtpExtension::kMIdUri,
+ SdpDirectionAttribute::Direction::kSendrecv);
AddVideoRtpExtension(webrtc::RtpExtension::kAbsSendTimeUri,
SdpDirectionAttribute::Direction::kSendrecv);
AddVideoRtpExtension(webrtc::RtpExtension::kTimestampOffsetUri,
SdpDirectionAttribute::Direction::kSendrecv);
AddVideoRtpExtension(webrtc::RtpExtension::kMIdUri,
SdpDirectionAttribute::Direction::kSendrecv);
}
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
@@ -161,18 +161,20 @@ bool WebrtcAudioConduit::SetLocalCNAME(c
char temp[256];
strncpy(temp, cname, sizeof(temp) - 1);
temp[sizeof(temp) - 1] = 0;
return !mPtrRTP->SetRTCP_CNAME(mChannel, temp);
}
bool WebrtcAudioConduit::SetLocalMID(const std::string& mid)
{
- // !mjf! stubbed out for now
- return false;
+ if (mPtrRTP->SetLocalMID(mChannel, mid.c_str())) {
+ return false;
+ }
+ return true;
}
bool WebrtcAudioConduit::GetSendPacketTypeStats(
webrtc::RtcpPacketTypeCounter* aPacketCounts)
{
if (!mEngineTransmitting) {
return false;
}
@@ -577,31 +579,46 @@ WebrtcAudioConduit::ConfigureRecvMediaCo
condError = StartReceiving();
if (condError != kMediaConduitNoError) {
return condError;
}
DumpCodecDB();
return kMediaConduitNoError;
}
+
MediaConduitErrorCode
WebrtcAudioConduit::EnableAudioLevelExtension(bool enabled, uint8_t id)
{
CSFLogDebug(logTag, "%s %d %d ", __FUNCTION__, enabled, id);
if (mPtrVoERTP_RTCP->SetSendAudioLevelIndicationStatus(mChannel, enabled, id) == -1)
{
CSFLogError(logTag, "%s SetSendAudioLevelIndicationStatus Failed", __FUNCTION__);
return kMediaConduitUnknownError;
}
return kMediaConduitNoError;
}
MediaConduitErrorCode
+WebrtcAudioConduit::EnableMIDExtension(bool enabled, uint8_t id)
+{
+ CSFLogDebug(logTag, "%s %d %d ", __FUNCTION__, enabled, id);
+
+ if (mPtrVoERTP_RTCP->SetSendMIDStatus(mChannel, enabled, id) == -1)
+ {
+ CSFLogError(logTag, "%s SetSendMIDStatus Failed", __FUNCTION__);
+ return kMediaConduitUnknownError;
+ }
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
WebrtcAudioConduit::SendAudioFrame(const int16_t audio_data[],
int32_t lengthSamples, // per channel
int32_t samplingFreqHz,
uint32_t channels,
int32_t capture_delay)
{
CSFLogDebug(logTag, "%s ", __FUNCTION__);
// Following checks need to be performed
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.h
@@ -92,16 +92,17 @@ public:
*/
virtual MediaConduitErrorCode ConfigureRecvMediaCodecs(
const std::vector<AudioCodecConfig* >& codecConfigList) override;
/**
* Function to enable the audio level extension
* @param enabled: enable extension
*/
virtual MediaConduitErrorCode EnableAudioLevelExtension(bool enabled, uint8_t id) override;
+ virtual MediaConduitErrorCode EnableMIDExtension(bool enabled, uint8_t id) override;
/**
* Register External Transport to this Conduit. RTP and RTCP frames from the VoiceEngine
* shall be passed to the registered transport for transporting externally.
*/
virtual MediaConduitErrorCode SetTransmitterTransport(RefPtr<TransportInterface> aTransport) override;
virtual MediaConduitErrorCode SetReceiverTransport(RefPtr<TransportInterface> aTransport) override;
--- a/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
+++ b/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
@@ -511,16 +511,17 @@ public:
const std::vector<AudioCodecConfig* >& recvCodecConfigList) = 0;
/**
* Function to enable the audio level extension
* @param enabled: enable extension
* @param id: id to be used for this rtp header extension
* NOTE: See AudioConduit for more information
*/
virtual MediaConduitErrorCode EnableAudioLevelExtension(bool enabled, uint8_t id) = 0;
+ virtual MediaConduitErrorCode EnableMIDExtension(bool enabled, uint8_t id) = 0;
virtual bool SetDtmfPayloadType(unsigned char type, int freq) = 0;
virtual bool InsertDTMFTone(int channel, int eventCode, bool outOfBand,
int lengthMs, int attenuationDb) = 0;
};
}
--- a/media/webrtc/signaling/src/peerconnection/MediaPipelineFactory.cpp
+++ b/media/webrtc/signaling/src/peerconnection/MediaPipelineFactory.cpp
@@ -726,45 +726,60 @@ MediaPipelineFactory::GetOrCreateAudioCo
if (!ssrcs.empty()) {
if (!conduit->SetLocalSSRCs(ssrcs)) {
MOZ_MTLOG(ML_ERROR, "SetLocalSSRCs failed");
return NS_ERROR_FAILURE;
}
}
conduit->SetLocalCNAME(aTrack.GetCNAME().c_str());
+ conduit->SetLocalMID(aTrackPair.mRtpTransport->mTransportId);
for (auto value: configs.values) {
if (value->mName == "telephone-event") {
// we have a telephone event codec, so we need to make sure
// the dynamic pt is set properly
conduit->SetDtmfPayloadType(value->mType, value->mFreq);
break;
}
}
auto error = conduit->ConfigureSendMediaCodec(configs.values[0]);
if (error) {
MOZ_MTLOG(ML_ERROR, "ConfigureSendMediaCodec failed: " << error);
return NS_ERROR_FAILURE;
}
+ // Should these be genericized like they are in the video conduit case?
const SdpExtmapAttributeList::Extmap* audioLevelExt =
aTrack.GetNegotiatedDetails()->GetExt(
webrtc::RtpExtension::kAudioLevelUri);
if (audioLevelExt) {
MOZ_MTLOG(ML_DEBUG, "Calling EnableAudioLevelExtension");
error = conduit->EnableAudioLevelExtension(true, audioLevelExt->entry);
if (error) {
MOZ_MTLOG(ML_ERROR, "EnableAudioLevelExtension failed: " << error);
return NS_ERROR_FAILURE;
}
}
+
+ const SdpExtmapAttributeList::Extmap* midExt =
+ aTrack.GetNegotiatedDetails()->GetExt(webrtc::RtpExtension::kMIdUri);
+
+ if (midExt) {
+ MOZ_MTLOG(ML_DEBUG, "Calling EnableMIDExtension");
+ error = conduit->EnableMIDExtension(true, midExt->entry);
+
+ if (error) {
+ MOZ_MTLOG(ML_ERROR, "EnableMIDExtension failed: " << error);
+ return NS_ERROR_FAILURE;
+ }
+ }
}
*aConduitp = conduit;
return NS_OK;
}
nsresult
--- a/media/webrtc/trunk/webrtc/config.cc
+++ b/media/webrtc/trunk/webrtc/config.cc
@@ -83,17 +83,18 @@ const int RtpExtension::kRepairedRtpStre
const char* RtpExtension::kMIdUri =
"urn:ietf:params:rtp-hdrext:sdes:mid";
const int RtpExtension::kMIdDefaultId = 9;
bool RtpExtension::IsSupportedForAudio(const std::string& uri) {
return uri == webrtc::RtpExtension::kAudioLevelUri ||
uri == webrtc::RtpExtension::kTransportSequenceNumberUri ||
uri == webrtc::RtpExtension::kRtpStreamIdUri ||
- uri == webrtc::RtpExtension::kRepairedRtpStreamIdUri;
+ uri == webrtc::RtpExtension::kRepairedRtpStreamIdUri ||
+ uri == webrtc::RtpExtension::kMIdUri;
}
bool RtpExtension::IsSupportedForVideo(const std::string& uri) {
return uri == webrtc::RtpExtension::kTimestampOffsetUri ||
uri == webrtc::RtpExtension::kAbsSendTimeUri ||
uri == webrtc::RtpExtension::kVideoRotationUri ||
uri == webrtc::RtpExtension::kTransportSequenceNumberUri ||
uri == webrtc::RtpExtension::kPlayoutDelayUri ||
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
@@ -438,17 +438,18 @@ bool RTPSender::SendOutgoingData(FrameTy
bool result;
if (audio_configured_) {
TRACE_EVENT_ASYNC_STEP1("webrtc", "Audio", rtp_timestamp, "Send", "type",
FrameTypeToString(frame_type));
assert(frame_type == kAudioFrameSpeech || frame_type == kAudioFrameCN ||
frame_type == kEmptyFrame);
result = audio_->SendAudio(frame_type, payload_type, rtp_timestamp,
- payload_data, payload_size, fragmentation);
+ payload_data, payload_size, fragmentation,
+ &mId);
} else {
TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", capture_time_ms,
"Send", "type", FrameTypeToString(frame_type));
assert(frame_type != kAudioFrameSpeech && frame_type != kAudioFrameCN);
if (frame_type == kEmptyFrame)
return true;
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
@@ -118,17 +118,18 @@ bool RTPSenderAudio::MarkerBit(FrameType
return marker_bit;
}
bool RTPSenderAudio::SendAudio(FrameType frame_type,
int8_t payload_type,
uint32_t rtp_timestamp,
const uint8_t* payload_data,
size_t payload_size,
- const RTPFragmentationHeader* fragmentation) {
+ const RTPFragmentationHeader* fragmentation,
+ const StreamId* mId) {
// From RFC 4733:
// A source has wide latitude as to how often it sends event updates. A
// natural interval is the spacing between non-event audio packets. [...]
// Alternatively, a source MAY decide to use a different spacing for event
// updates, with a value of 50 ms RECOMMENDED.
constexpr int kDtmfIntervalTimeMs = 50;
uint8_t audio_level_dbov = 0;
uint32_t dtmf_payload_freq = 0;
@@ -221,16 +222,20 @@ bool RTPSenderAudio::SendAudio(FrameType
packet->SetMarker(MarkerBit(frame_type, payload_type));
packet->SetPayloadType(payload_type);
packet->SetTimestamp(rtp_timestamp);
packet->set_capture_time_ms(clock_->TimeInMilliseconds());
// Update audio level extension, if included.
packet->SetExtension<AudioLevel>(frame_type == kAudioFrameSpeech,
audio_level_dbov);
+ if (mId && !mId->empty()) {
+ packet->SetExtension<MId>(*mId);
+ }
+
if (fragmentation && fragmentation->fragmentationVectorSize > 0) {
// Use the fragment info if we have one.
uint8_t* payload =
packet->AllocatePayload(1 + fragmentation->fragmentationLength[0]);
if (!payload) // Too large payload buffer.
return false;
payload[0] = fragmentation->fragmentationPlType[0];
memcpy(payload + 1, payload_data + fragmentation->fragmentationOffset[0],
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h
@@ -35,17 +35,18 @@ class RTPSenderAudio {
uint32_t rate,
RtpUtility::Payload** payload);
bool SendAudio(FrameType frame_type,
int8_t payload_type,
uint32_t capture_timestamp,
const uint8_t* payload_data,
size_t payload_size,
- const RTPFragmentationHeader* fragmentation);
+ const RTPFragmentationHeader* fragmentation,
+ const StreamId* mId);
// Store the audio level in dBov for
// header-extension-for-audio-level-indication.
// Valid range is [0,100]. Actual value is negative.
int32_t SetAudioLevel(uint8_t level_dbov);
// Send a DTMF tone using RFC 2833 (4733)
int32_t SendTelephoneEvent(uint8_t key, uint16_t time_ms, uint8_t level);
--- a/media/webrtc/trunk/webrtc/voice_engine/channel.cc
+++ b/media/webrtc/trunk/webrtc/voice_engine/channel.cc
@@ -2560,16 +2560,28 @@ int Channel::SetSendTelephoneEventPayloa
return 0;
}
int Channel::VoiceActivityIndicator(int& activity) {
activity = _sendFrameType;
return 0;
}
+int Channel::SetLocalMID(const char* mid) {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::SetLocalMID()");
+ if (channel_state_.Get().sending) {
+ _engineStatisticsPtr->SetLastError(VE_ALREADY_SENDING, kTraceError,
+ "SetLocalMID() already sending");
+ return -1;
+ }
+ _rtpRtcpModule->SetMID(mid);
+ return 0;
+}
+
int Channel::SetLocalSSRC(unsigned int ssrc) {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
"Channel::SetLocalSSRC()");
if (channel_state_.Get().sending) {
_engineStatisticsPtr->SetLastError(VE_ALREADY_SENDING, kTraceError,
"SetLocalSSRC() already sending");
return -1;
}
@@ -2587,16 +2599,20 @@ int Channel::GetRemoteSSRC(unsigned int&
return 0;
}
int Channel::SetSendAudioLevelIndicationStatus(bool enable, unsigned char id) {
_includeAudioLevelIndication = enable;
return SetSendRtpHeaderExtension(enable, kRtpExtensionAudioLevel, id);
}
+int Channel::SetSendMIDStatus(bool enable, unsigned char id) {
+ return SetSendRtpHeaderExtension(enable, kRtpExtensionMId, id);
+}
+
int Channel::SetReceiveAudioLevelIndicationStatus(bool enable,
unsigned char id) {
rtp_header_parser_->DeregisterRtpHeaderExtension(kRtpExtensionAudioLevel);
if (enable &&
!rtp_header_parser_->RegisterRtpHeaderExtension(kRtpExtensionAudioLevel,
id)) {
return -1;
}
--- a/media/webrtc/trunk/webrtc/voice_engine/channel.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/channel.h
@@ -290,20 +290,22 @@ class Channel
// DTMF
int SendTelephoneEventOutband(int event, int duration_ms);
int SetSendTelephoneEventPayloadType(int payload_type, int payload_frequency);
// VoEAudioProcessingImpl
int VoiceActivityIndicator(int& activity);
// VoERTP_RTCP
+ int SetLocalMID(const char* mid);
int SetLocalSSRC(unsigned int ssrc);
int GetLocalSSRC(unsigned int& ssrc);
int GetRemoteSSRC(unsigned int& ssrc);
int SetSendAudioLevelIndicationStatus(bool enable, unsigned char id);
+ int SetSendMIDStatus(bool enable, unsigned char id);
int SetReceiveAudioLevelIndicationStatus(bool enable, unsigned char id);
int SetSendAbsoluteSenderTimeStatus(bool enable, unsigned char id);
int SetReceiveAbsoluteSenderTimeStatus(bool enable, unsigned char id);
void EnableSendTransportSequenceNumber(int id);
void EnableReceiveTransportSequenceNumber(int id);
void RegisterSenderCongestionControlObjects(
RtpPacketSender* rtp_packet_sender,
--- a/media/webrtc/trunk/webrtc/voice_engine/include/voe_rtp_rtcp.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/include/voe_rtp_rtcp.h
@@ -105,30 +105,36 @@ class WEBRTC_DLLEXPORT VoERTP_RTCP {
static VoERTP_RTCP* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoERTP_RTCP sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted.
virtual int Release() = 0;
+ // Sets the local RTP MID for the specified |channel|.
+ virtual int SetLocalMID(int channel, const char* mid) = 0;
+
// Sets the local RTP synchronization source identifier (SSRC) explicitly.
virtual int SetLocalSSRC(int channel, unsigned int ssrc) = 0;
// Gets the local RTP SSRC of a specified |channel|.
virtual int GetLocalSSRC(int channel, unsigned int& ssrc) = 0;
// Gets the SSRC of the incoming RTP packets.
virtual int GetRemoteSSRC(int channel, unsigned int& ssrc) = 0;
// Sets the status of rtp-audio-level-indication on a specific |channel|.
virtual int SetSendAudioLevelIndicationStatus(int channel,
bool enable,
unsigned char id = 1) = 0;
+ // Sets the status of sending MID on a specific |channel|.
+ virtual int SetSendMIDStatus(int channel, bool enable, unsigned char id = 1) = 0;
+
// Sets the status of receiving rtp-audio-level-indication on a specific
// |channel|.
virtual int SetReceiveAudioLevelIndicationStatus(int channel,
bool enable,
unsigned char id = 1) {
// TODO(wu): Remove default implementation once talk is updated.
return 0;
}
--- a/media/webrtc/trunk/webrtc/voice_engine/voe_rtp_rtcp_impl.cc
+++ b/media/webrtc/trunk/webrtc/voice_engine/voe_rtp_rtcp_impl.cc
@@ -33,16 +33,33 @@ VoERTP_RTCPImpl::VoERTP_RTCPImpl(voe::Sh
"VoERTP_RTCPImpl::VoERTP_RTCPImpl() - ctor");
}
VoERTP_RTCPImpl::~VoERTP_RTCPImpl() {
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
"VoERTP_RTCPImpl::~VoERTP_RTCPImpl() - dtor");
}
+int VoERTP_RTCPImpl::SetLocalMID(int channel, const char* mid) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetLocalMID(channel=%d, %s)", channel, mid);
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+ voe::Channel* channelPtr = ch.channel();
+ if (channelPtr == NULL) {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetLocalMID() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetLocalMID(mid);
+}
+
int VoERTP_RTCPImpl::SetLocalSSRC(int channel, unsigned int ssrc) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetLocalSSRC(channel=%d, %lu)", channel, ssrc);
if (!_shared->statistics().Initialized()) {
_shared->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
@@ -113,16 +130,50 @@ int VoERTP_RTCPImpl::SetSendAudioLevelIn
_shared->SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetSendAudioLevelIndicationStatus() failed to locate channel");
return -1;
}
return channelPtr->SetSendAudioLevelIndicationStatus(enable, id);
}
+int VoERTP_RTCPImpl::SetSendMIDStatus(int channel,
+ bool enable,
+ unsigned char id) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetSendMIDStatus(channel=%d, enable=%d,"
+ " ID=%u)",
+ channel, enable, id);
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (enable && (id < kVoiceEngineMinRtpExtensionId ||
+ id > kVoiceEngineMaxRtpExtensionId)) {
+ // [RFC5285] The 4-bit id is the local identifier of this element in
+ // the range 1-14 inclusive.
+ _shared->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "SetSendMIDStatus() invalid ID parameter");
+ return -1;
+ }
+
+ // Set state and id for the specified channel.
+ voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+ voe::Channel* channelPtr = ch.channel();
+ if (channelPtr == NULL) {
+ _shared->SetLastError(
+ VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetSendMIDStatus() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetSendMIDStatus(enable, id);
+}
+
+
int VoERTP_RTCPImpl::SetReceiveAudioLevelIndicationStatus(int channel,
bool enable,
unsigned char id) {
WEBRTC_TRACE(
kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetReceiveAudioLevelIndicationStatus(channel=%d, enable=%d, id=%u)",
channel, enable, id);
if (!_shared->statistics().Initialized()) {
--- a/media/webrtc/trunk/webrtc/voice_engine/voe_rtp_rtcp_impl.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/voe_rtp_rtcp_impl.h
@@ -23,27 +23,30 @@ class VoERTP_RTCPImpl : public VoERTP_RT
int SetRTCPStatus(int channel, bool enable) override;
int GetRTCPStatus(int channel, bool& enabled) override;
int SetRTCP_CNAME(int channel, const char cName[256]) override;
int GetRemoteRTCP_CNAME(int channel, char cName[256]) override;
+ int SetLocalMID(int channel, const char* mid) override;
+
// SSRC
int SetLocalSSRC(int channel, unsigned int ssrc) override;
int GetLocalSSRC(int channel, unsigned int& ssrc) override;
int GetRemoteSSRC(int channel, unsigned int& ssrc) override;
// RTP Header Extension for Client-to-Mixer Audio Level Indication
int SetSendAudioLevelIndicationStatus(int channel,
bool enable,
unsigned char id) override;
+ int SetSendMIDStatus(int channel, bool enable, unsigned char id = 1) override;
int SetReceiveAudioLevelIndicationStatus(int channel,
bool enable,
unsigned char id) override;
// Statistics
int GetRTPStatistics(int channel,
unsigned int& averageJitterMs,
unsigned int& maxJitterMs,