Bug 1369967 - Remove one use of a stack-allocated buffer and fix another buffer's size. r?jesup
Two things here:
- The default stack size of the thread pool is not very big, it's better to
stick the buffer we need on the object.
- There was a unit mismatch between bytes and samples. This changes the name to
make the unit more obvious, and fixes its usage by dividing by the sample size.
MozReview-Commit-ID: 19bbS6iGvTw
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -58,20 +58,20 @@
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "nsThreadUtils.h"
#include "logging.h"
// Max size given stereo is 480*2*2 = 1920 (10ms of 16-bits stereo audio at
// 48KHz)
-#define AUDIO_SAMPLE_BUFFER_MAX 480*2*2
+#define AUDIO_SAMPLE_BUFFER_MAX_BYTES 480*2*2
static_assert((WEBRTC_DEFAULT_SAMPLE_RATE/100)*sizeof(uint16_t) * 2
- <= AUDIO_SAMPLE_BUFFER_MAX,
- "AUDIO_SAMPLE_BUFFER_MAX is not large enough");
+ <= AUDIO_SAMPLE_BUFFER_MAX_BYTES,
+ "AUDIO_SAMPLE_BUFFER_MAX_BYTES is not large enough");
using namespace mozilla;
using namespace mozilla::dom;
using namespace mozilla::gfx;
using namespace mozilla::layers;
// Logging context
MOZ_MTLOG_MODULE("mediapipeline")
@@ -543,22 +543,18 @@ public:
packetizer_ = new AudioPacketizer<int16_t, int16_t>(audio_10ms, outputChannels);
}
packetizer_->Input(samples, chunk.mDuration);
while (packetizer_->PacketsAvailable()) {
uint32_t samplesPerPacket = packetizer_->PacketSize() *
packetizer_->Channels();
- // We know that webrtc.org's code going to copy the samples down the line,
- // so we can just use a stack buffer here instead of malloc-ing.
- int16_t packet[AUDIO_SAMPLE_BUFFER_MAX];
-
- packetizer_->Output(packet);
- mConduit->SendAudioFrame(packet, samplesPerPacket, rate, 0);
+ packetizer_->Output(packet_);
+ mConduit->SendAudioFrame(packet_, samplesPerPacket, rate, 0);
}
}
void QueueAudioChunk(TrackRate rate, AudioChunk& chunk, bool enabled)
{
RUN_ON_THREAD(mThread,
WrapRunnable(RefPtr<AudioProxyThread>(this),
&AudioProxyThread::InternalProcessAudioChunk,
@@ -575,16 +571,18 @@ protected:
NS_ReleaseOnMainThread(mConduit.forget());
MOZ_COUNT_DTOR(AudioProxyThread);
}
RefPtr<AudioSessionConduit> mConduit;
nsCOMPtr<nsIEventTarget> mThread;
// Only accessed on mThread
nsAutoPtr<AudioPacketizer<int16_t, int16_t>> packetizer_;
+ // A buffer to hold a single packet of audio.
+ int16_t packet_[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
};
static char kDTLSExporterLabel[] = "EXTRACTOR-dtls_srtp";
MediaPipeline::MediaPipeline(const std::string& pc,
Direction direction,
nsCOMPtr<nsIEventTarget> main_thread,
nsCOMPtr<nsIEventTarget> sts_thread,
@@ -2033,17 +2031,17 @@ public:
if (!source_) {
MOZ_MTLOG(ML_ERROR, "NotifyPull() called from a non-SourceMediaStream");
return;
}
// This comparison is done in total time to avoid accumulated roundoff errors.
while (source_->TicksToTimeRoundDown(WEBRTC_DEFAULT_SAMPLE_RATE,
played_ticks_) < desired_time) {
- int16_t scratch_buffer[AUDIO_SAMPLE_BUFFER_MAX];
+ int16_t scratch_buffer[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
int samples_length;
// This fetches 10ms of data, either mono or stereo
MediaConduitErrorCode err =
static_cast<AudioSessionConduit*>(conduit_.get())->GetAudioFrame(
scratch_buffer,
WEBRTC_DEFAULT_SAMPLE_RATE,
@@ -2056,17 +2054,17 @@ public:
<< ") to return data @ " << played_ticks_
<< " (desired " << desired_time << " -> "
<< source_->StreamTimeToSeconds(desired_time) << ")");
// if this is not enough we'll loop and provide more
samples_length = WEBRTC_DEFAULT_SAMPLE_RATE/100;
PodArrayZero(scratch_buffer);
}
- MOZ_ASSERT(samples_length * sizeof(uint16_t) < AUDIO_SAMPLE_BUFFER_MAX);
+ MOZ_ASSERT(samples_length * sizeof(uint16_t) < AUDIO_SAMPLE_BUFFER_MAX_BYTES);
MOZ_MTLOG(ML_DEBUG, "Audio conduit returned buffer of length "
<< samples_length);
RefPtr<SharedBuffer> samples = SharedBuffer::Create(samples_length * sizeof(uint16_t));
int16_t *samples_data = static_cast<int16_t *>(samples->Data());
AudioSegment segment;
// We derive the number of channels of the stream from the number of samples