Bug 1240417. Part 1 - add a writer class to encapsulate pointer arithmetic. r=kinetik.
--- a/dom/media/AudioStream.cpp
+++ b/dom/media/AudioStream.cpp
@@ -548,144 +548,135 @@ AudioStream::Downmix(AudioDataValue* aBu
if (mChannels >= 2 && mIsMonoAudioEnabled) {
DownmixStereoToMono(aBuffer, aFrames);
}
return true;
}
-long
-AudioStream::GetUnprocessed(void* aBuffer, long aFrames)
+void
+AudioStream::GetUnprocessed(AudioBufferWriter& aWriter)
{
mMonitor.AssertCurrentThreadOwns();
- uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer);
// Flush the timestretcher pipeline, if we were playing using a playback rate
// other than 1.0.
- uint32_t flushedFrames = 0;
if (mTimeStretcher && mTimeStretcher->numSamples()) {
- flushedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames);
- wpos += FramesToBytes(flushedFrames);
+ auto timeStretcher = mTimeStretcher;
+ aWriter.Write([timeStretcher] (AudioDataValue* aPtr, uint32_t aFrames) {
+ return timeStretcher->receiveSamples(aPtr, aFrames);
+ }, aWriter.Available());
// TODO: There might be still unprocessed samples in the stretcher.
// We should either remove or flush them so they won't be in the output
// next time we switch a playback rate other than 1.0.
NS_WARN_IF(mTimeStretcher->numUnprocessedSamples() > 0);
}
- uint32_t toPopFrames = aFrames - flushedFrames;
- while (toPopFrames > 0) {
- UniquePtr<Chunk> c = mDataSource.PopFrames(toPopFrames);
+ while (aWriter.Available() > 0) {
+ UniquePtr<Chunk> c = mDataSource.PopFrames(aWriter.Available());
if (c->Frames() == 0) {
break;
}
- MOZ_ASSERT(c->Frames() <= toPopFrames);
+ MOZ_ASSERT(c->Frames() <= aWriter.Available());
if (Downmix(c->GetWritable(), c->Frames())) {
- memcpy(wpos, c->Data(), FramesToBytes(c->Frames()));
+ aWriter.Write(c->Data(), c->Frames());
} else {
// Write silence if downmixing fails.
- memset(wpos, 0, FramesToBytes(c->Frames()));
+ aWriter.WriteZeros(c->Frames());
}
- wpos += FramesToBytes(c->Frames());
- toPopFrames -= c->Frames();
}
-
- return aFrames - toPopFrames;
}
-long
-AudioStream::GetTimeStretched(void* aBuffer, long aFrames)
+void
+AudioStream::GetTimeStretched(AudioBufferWriter& aWriter)
{
mMonitor.AssertCurrentThreadOwns();
// We need to call the non-locking version, because we already have the lock.
if (EnsureTimeStretcherInitializedUnlocked() != NS_OK) {
- return 0;
+ return;
}
- uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer);
double playbackRate = static_cast<double>(mInRate) / mOutRate;
- uint32_t toPopFrames = ceil(aFrames * playbackRate);
+ uint32_t toPopFrames = ceil(aWriter.Available() * playbackRate);
- while (mTimeStretcher->numSamples() < static_cast<uint32_t>(aFrames)) {
+ while (mTimeStretcher->numSamples() < aWriter.Available()) {
UniquePtr<Chunk> c = mDataSource.PopFrames(toPopFrames);
if (c->Frames() == 0) {
break;
}
MOZ_ASSERT(c->Frames() <= toPopFrames);
if (Downmix(c->GetWritable(), c->Frames())) {
mTimeStretcher->putSamples(c->Data(), c->Frames());
} else {
// Write silence if downmixing fails.
nsAutoTArray<AudioDataValue, 1000> buf;
buf.SetLength(mOutChannels * c->Frames());
memset(buf.Elements(), 0, buf.Length() * sizeof(AudioDataValue));
mTimeStretcher->putSamples(buf.Elements(), c->Frames());
}
}
- uint32_t receivedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames);
- wpos += FramesToBytes(receivedFrames);
- return receivedFrames;
+ auto timeStretcher = mTimeStretcher;
+ aWriter.Write([timeStretcher] (AudioDataValue* aPtr, uint32_t aFrames) {
+ return timeStretcher->receiveSamples(aPtr, aFrames);
+ }, aWriter.Available());
}
long
AudioStream::DataCallback(void* aBuffer, long aFrames)
{
MonitorAutoLock mon(mMonitor);
MOZ_ASSERT(mState != SHUTDOWN, "No data callback after shutdown");
- uint32_t underrunFrames = 0;
- uint32_t servicedFrames = 0;
+
+ auto writer = AudioBufferWriter(
+ reinterpret_cast<AudioDataValue*>(aBuffer), mOutChannels, aFrames);
// FIXME: cubeb_pulse sometimes calls us before cubeb_stream_start() is called.
// We don't want to consume audio data until Start() is called by the client.
if (mState == INITIALIZED) {
NS_WARNING("data callback fires before cubeb_stream_start() is called");
mAudioClock.UpdateFrameHistory(0, aFrames);
- memset(aBuffer, 0, FramesToBytes(aFrames));
- return aFrames;
+ return writer.WriteZeros(aFrames);
}
// NOTE: wasapi (others?) can call us back *after* stop()/Shutdown() (mState == SHUTDOWN)
// Bug 996162
// callback tells us cubeb succeeded initializing
if (mState == STARTED) {
mState = RUNNING;
}
if (mInRate == mOutRate) {
- servicedFrames = GetUnprocessed(aBuffer, aFrames);
+ GetUnprocessed(writer);
} else {
- servicedFrames = GetTimeStretched(aBuffer, aFrames);
+ GetTimeStretched(writer);
}
- underrunFrames = aFrames - servicedFrames;
-
// Always send audible frames first, and silent frames later.
// Otherwise it will break the assumption of FrameHistory.
if (!mDataSource.Ended()) {
- mAudioClock.UpdateFrameHistory(servicedFrames, underrunFrames);
- uint8_t* rpos = static_cast<uint8_t*>(aBuffer) + FramesToBytes(aFrames - underrunFrames);
- memset(rpos, 0, FramesToBytes(underrunFrames));
- if (underrunFrames) {
+ mAudioClock.UpdateFrameHistory(aFrames - writer.Available(), writer.Available());
+ if (writer.Available() > 0) {
MOZ_LOG(gAudioStreamLog, LogLevel::Warning,
- ("AudioStream %p lost %d frames", this, underrunFrames));
+ ("AudioStream %p lost %d frames", this, writer.Available()));
+ writer.WriteZeros(writer.Available());
}
- servicedFrames += underrunFrames;
} else {
// No more new data in the data source. Don't send silent frames so the
// cubeb stream can start draining.
- mAudioClock.UpdateFrameHistory(servicedFrames, 0);
+ mAudioClock.UpdateFrameHistory(aFrames - writer.Available(), 0);
}
WriteDumpFile(mDumpFile, this, aFrames, aBuffer);
- return servicedFrames;
+ return aFrames - writer.Available();
}
void
AudioStream::StateCallback(cubeb_state aState)
{
MonitorAutoLock mon(mMonitor);
MOZ_ASSERT(mState != SHUTDOWN, "No state callback after shutdown");
LOG(("AudioStream: StateCallback %p, mState=%d cubeb_state=%d", this, mState, aState));
--- a/dom/media/AudioStream.h
+++ b/dom/media/AudioStream.h
@@ -143,16 +143,76 @@ public:
private:
UniquePtr<uint8_t[]> mBuffer;
uint32_t mCapacity;
uint32_t mStart;
uint32_t mCount;
};
+/*
+ * A bookkeeping class to track the read/write position of an audio buffer.
+ */
+class AudioBufferCursor {
+public:
+ AudioBufferCursor(AudioDataValue* aPtr, uint32_t aChannels, uint32_t aFrames)
+ : mPtr(aPtr), mChannels(aChannels), mFrames(aFrames) {}
+
+ // Advance the cursor to account for frames that are consumed.
+ uint32_t Advance(uint32_t aFrames) {
+ MOZ_ASSERT(mFrames >= aFrames);
+ mFrames -= aFrames;
+ mPtr += mChannels * aFrames;
+ return aFrames;
+ }
+
+ // The number of frames available for read/write in this buffer.
+ uint32_t Available() const { return mFrames; }
+
+ // Return a pointer where read/write should begin.
+ AudioDataValue* Ptr() const { return mPtr; }
+
+protected:
+ AudioDataValue* mPtr;
+ const uint32_t mChannels;
+ uint32_t mFrames;
+};
+
+/*
+ * A helper class to encapsulate pointer arithmetic and provide means to modify
+ * the underlying audio buffer.
+ */
+class AudioBufferWriter : private AudioBufferCursor {
+public:
+ AudioBufferWriter(AudioDataValue* aPtr, uint32_t aChannels, uint32_t aFrames)
+ : AudioBufferCursor(aPtr, aChannels, aFrames) {}
+
+ uint32_t WriteZeros(uint32_t aFrames) {
+ memset(mPtr, 0, sizeof(AudioDataValue) * mChannels * aFrames);
+ return Advance(aFrames);
+ }
+
+ uint32_t Write(const AudioDataValue* aPtr, uint32_t aFrames) {
+ memcpy(mPtr, aPtr, sizeof(AudioDataValue) * mChannels * aFrames);
+ return Advance(aFrames);
+ }
+
+ // Provide a write fuction to update the audio buffer with the following
+ // signature: uint32_t(const AudioDataValue* aPtr, uint32_t aFrames)
+ // aPtr: Pointer to the audio buffer.
+ // aFrames: The number of frames available in the buffer.
+ // return: The number of frames actually written by the function.
+ template <typename Function>
+ uint32_t Write(const Function& aFunction, uint32_t aFrames) {
+ return Advance(aFunction(mPtr, aFrames));
+ }
+
+ using AudioBufferCursor::Available;
+};
+
// Access to a single instance of this class must be synchronized by
// callers, or made from a single thread. One exception is that access to
// GetPosition, GetPositionInFrames, SetVolume, and Get{Rate,Channels},
// SetMicrophoneActive is thread-safe without external synchronization.
class AudioStream final
{
virtual ~AudioStream();
@@ -258,18 +318,18 @@ private:
long DataCallback(void* aBuffer, long aFrames);
void StateCallback(cubeb_state aState);
nsresult EnsureTimeStretcherInitializedUnlocked();
// Return true if downmixing succeeds otherwise false.
bool Downmix(AudioDataValue* aBuffer, uint32_t aFrames);
- long GetUnprocessed(void* aBuffer, long aFrames);
- long GetTimeStretched(void* aBuffer, long aFrames);
+ void GetUnprocessed(AudioBufferWriter& aWriter);
+ void GetTimeStretched(AudioBufferWriter& aWriter);
void StartUnlocked();
// The monitor is held to protect all access to member variables.
Monitor mMonitor;
// Input rate in Hz (characteristic of the media being played)
int mInRate;
--- a/dom/media/mediasink/DecodedAudioDataSink.cpp
+++ b/dom/media/mediasink/DecodedAudioDataSink.cpp
@@ -162,27 +162,22 @@ DecodedAudioDataSink::GetEndTime() const
return playedUsecs.value();
}
UniquePtr<AudioStream::Chunk>
DecodedAudioDataSink::PopFrames(uint32_t aFrames)
{
class Chunk : public AudioStream::Chunk {
public:
- Chunk(AudioData* aBuffer, uint32_t aFrames, uint32_t aOffset)
- : mBuffer(aBuffer)
- , mFrames(aFrames)
- , mData(aBuffer->mAudioData.get() + aBuffer->mChannels * aOffset) {
- MOZ_ASSERT(aOffset + aFrames <= aBuffer->mFrames);
- }
+ Chunk(AudioData* aBuffer, uint32_t aFrames, AudioDataValue* aData)
+ : mBuffer(aBuffer), mFrames(aFrames), mData(aData) {}
Chunk() : mFrames(0), mData(nullptr) {}
const AudioDataValue* Data() const { return mData; }
uint32_t Frames() const { return mFrames; }
AudioDataValue* GetWritable() const { return mData; }
-
private:
const RefPtr<AudioData> mBuffer;
const uint32_t mFrames;
AudioDataValue* const mData;
};
class SilentChunk : public AudioStream::Chunk {
public:
@@ -227,42 +222,44 @@ DecodedAudioDataSink::PopFrames(uint32_t
// hardware so that the next audio chunk begins playback at the correct
// time.
missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
auto framesToPop = std::min<uint32_t>(missingFrames.value(), aFrames);
mWritten += framesToPop;
return MakeUnique<SilentChunk>(framesToPop, mInfo.mChannels);
}
- mFramesPopped = 0;
mCurrentData = dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());
+ mCursor = MakeUnique<AudioBufferCursor>(mCurrentData->mAudioData.get(),
+ mCurrentData->mChannels,
+ mCurrentData->mFrames);
}
- auto framesToPop = std::min(aFrames, mCurrentData->mFrames - mFramesPopped);
+ auto framesToPop = std::min(aFrames, mCursor->Available());
SINK_LOG_V("playing audio at time=%lld offset=%u length=%u",
- mCurrentData->mTime, mFramesPopped, framesToPop);
+ mCurrentData->mTime, mCurrentData->mFrames - mCursor->Available(), framesToPop);
UniquePtr<AudioStream::Chunk> chunk;
if (mCurrentData->mRate == mInfo.mRate &&
mCurrentData->mChannels == mInfo.mChannels) {
- chunk = MakeUnique<Chunk>(mCurrentData, framesToPop, mFramesPopped);
+ chunk = MakeUnique<Chunk>(mCurrentData, framesToPop, mCursor->Ptr());
} else {
SINK_LOG_V("mismatched sample format mInfo=[%uHz/%u channels] audio=[%uHz/%u channels]",
mInfo.mRate, mInfo.mChannels, mCurrentData->mRate, mCurrentData->mChannels);
chunk = MakeUnique<SilentChunk>(framesToPop, mInfo.mChannels);
}
mWritten += framesToPop;
- mFramesPopped += framesToPop;
+ mCursor->Advance(framesToPop);
// All frames are popped. Reset mCurrentData so we can pop new elements from
// the audio queue in next calls to PopFrames().
- if (mFramesPopped == mCurrentData->mFrames) {
+ if (mCursor->Available() == 0) {
mCurrentData = nullptr;
}
return chunk;
}
bool
DecodedAudioDataSink::Ended() const
--- a/dom/media/mediasink/DecodedAudioDataSink.h
+++ b/dom/media/mediasink/DecodedAudioDataSink.h
@@ -92,18 +92,18 @@ private:
MozPromiseHolder<GenericPromise> mEndPromise;
/*
* Members to implement AudioStream::DataSource.
* Used on the callback thread of cubeb.
*/
// The AudioData at which AudioStream::DataSource is reading.
RefPtr<AudioData> mCurrentData;
- // The number of frames that have been popped from mCurrentData.
- uint32_t mFramesPopped = 0;
+ // Keep track of the read positoin of mCurrentData.
+ UniquePtr<AudioBufferCursor> mCursor;
// True if there is any error in processing audio data like overflow.
bool mErrored = false;
};
} // namespace media
} // namespace mozilla
#endif