Bug 1397793 - Use the MSG rate in MediaPipeline/PeerConnectionImpl. r?pehrsons
We used to fix the rate, arbitrarily, to 32kHz. Because the graph is almost
never running at 32kHz (more like 44.1kHz or 48kHz), and the codec would often
not be at 32kHz, this meant multiple resampling:
- Once here, in MediaPipeline, to bring to 32kHz
- Once when getting inserted in the MSG (so that the audio was brought back to
MSG rate)
- Maybe once in cubeb (depending on the platform)
This always removes the second resampling: the track is now at the correct rate,
as far as the MSG is concerned.
Additionally, if the MSG is running at 48kHz, more resampling are saved, because
it's one of the native webrtc.org rates.
MozReview-Commit-ID: DBWcwuWxUpu
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -58,17 +58,17 @@
#include "nsThreadUtils.h"
#include "CSFLog.h"
// Max size given stereo is 480*2*2 = 1920 (10ms of 16-bits stereo audio at
// 48KHz)
#define AUDIO_SAMPLE_BUFFER_MAX_BYTES 480*2*2
-static_assert((WEBRTC_DEFAULT_SAMPLE_RATE/100)*sizeof(uint16_t) * 2
+static_assert((WEBRTC_MAX_SAMPLE_RATE/100)*sizeof(uint16_t) * 2
<= AUDIO_SAMPLE_BUFFER_MAX_BYTES,
"AUDIO_SAMPLE_BUFFER_MAX_BYTES is not large enough");
using namespace mozilla;
using namespace mozilla::dom;
using namespace mozilla::gfx;
using namespace mozilla::layers;
@@ -2033,53 +2033,56 @@ public:
void NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) override
{
MOZ_ASSERT(source_);
if (!source_) {
CSFLogError(LOGTAG, "NotifyPull() called from a non-SourceMediaStream");
return;
}
+ TrackRate rate = graph->GraphRate();
+ uint32_t samples_per_10ms = rate/100;
+
// This comparison is done in total time to avoid accumulated roundoff errors.
- while (source_->TicksToTimeRoundDown(WEBRTC_DEFAULT_SAMPLE_RATE,
+ while (source_->TicksToTimeRoundDown(rate,
played_ticks_) < desired_time) {
int16_t scratch_buffer[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
int samples_length;
// This fetches 10ms of data, either mono or stereo
MediaConduitErrorCode err =
static_cast<AudioSessionConduit*>(conduit_.get())->GetAudioFrame(
scratch_buffer,
- WEBRTC_DEFAULT_SAMPLE_RATE,
+ rate,
0, // TODO(ekr@rtfm.com): better estimate of "capture" (really playout) delay
samples_length);
if (err != kMediaConduitNoError) {
// Insert silence on conduit/GIPS failure (extremely unlikely)
CSFLogError(LOGTAG, "Audio conduit failed (%d) to return data @ %" PRId64 " (desired %" PRId64 " -> %f)",
err, played_ticks_, desired_time,
source_->StreamTimeToSeconds(desired_time));
// if this is not enough we'll loop and provide more
- samples_length = WEBRTC_DEFAULT_SAMPLE_RATE/100;
+ samples_length = samples_per_10ms;
PodArrayZero(scratch_buffer);
}
- MOZ_ASSERT(samples_length * sizeof(uint16_t) < AUDIO_SAMPLE_BUFFER_MAX_BYTES);
+ MOZ_ASSERT(samples_length * sizeof(uint16_t) <= AUDIO_SAMPLE_BUFFER_MAX_BYTES);
CSFLogDebug(LOGTAG, "Audio conduit returned buffer of length %u",
samples_length);
RefPtr<SharedBuffer> samples = SharedBuffer::Create(samples_length * sizeof(uint16_t));
int16_t *samples_data = static_cast<int16_t *>(samples->Data());
AudioSegment segment;
// We derive the number of channels of the stream from the number of samples
// the AudioConduit gives us, considering it gives us packets of 10ms and we
// know the rate.
- uint32_t channelCount = samples_length / (WEBRTC_DEFAULT_SAMPLE_RATE / 100);
+ uint32_t channelCount = samples_length / samples_per_10ms;
AutoTArray<int16_t*,2> channels;
AutoTArray<const int16_t*,2> outputChannels;
size_t frames = samples_length / channelCount;
channels.SetLength(channelCount);
size_t offset = 0;
for (size_t i = 0; i < channelCount; i++) {
@@ -2096,17 +2099,17 @@ public:
segment.AppendFrames(samples.forget(), outputChannels, frames,
principal_handle_);
// Handle track not actually added yet or removed/finished
if (source_->AppendToTrack(track_id_, &segment)) {
played_ticks_ += frames;
if (MOZ_LOG_TEST(AudioLogModule(), LogLevel::Debug)) {
- if (played_ticks_ > last_log_ + WEBRTC_DEFAULT_SAMPLE_RATE) { // ~ 1 second
+ if (played_ticks_ > last_log_ + graph->GraphRate()) { // ~ 1 second
MOZ_LOG(AudioLogModule(), LogLevel::Debug,
("%p: Inserting %zu samples into track %d, total = %" PRIu64,
(void*) this, frames, track_id_, played_ticks_));
last_log_ = played_ticks_;
}
}
} else {
CSFLogError(LOGTAG, "AppendToTrack failed");
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
@@ -23,16 +23,17 @@
#include "StreamTracks.h"
#include "signaling/src/peerconnection/PacketDumper.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
// Should come from MediaEngine.h, but that's a pain to include here
// because of the MOZILLA_EXTERNAL_LINKAGE stuff.
#define WEBRTC_DEFAULT_SAMPLE_RATE 32000
+#define WEBRTC_MAX_SAMPLE_RATE 48000
class nsIPrincipal;
namespace mozilla {
class MediaPipelineFilter;
class PeerIdentity;
class AudioProxyThread;
class VideoFrameConverter;