Bug 948267. Part 2 - implement AudioStream::DataSource for DecodedAudioDataSink and remove its audio thread. r=kinetik. draft
authorJW Wang <jwwang@mozilla.com>
Mon, 11 Jan 2016 06:47:02 +0800
changeset 320365 6bb42d612a715135de5a78fa7bdca652e7826242
parent 320364 861179f6bbd5eeb591c3be04c54a17422a627686
child 320366 7ce5d6464425f5dccb5492853a5cf5221bc7ea74
push id9178
push userjwwang@mozilla.com
push dateSun, 10 Jan 2016 23:51:49 +0000
reviewerskinetik
bugs948267
milestone46.0a1
Bug 948267. Part 2 - implement AudioStream::DataSource for DecodedAudioDataSink and remove its audio thread. r=kinetik.
dom/media/mediasink/AudioSink.h
dom/media/mediasink/AudioSinkWrapper.cpp
dom/media/mediasink/DecodedAudioDataSink.cpp
dom/media/mediasink/DecodedAudioDataSink.h
--- a/dom/media/mediasink/AudioSink.h
+++ b/dom/media/mediasink/AudioSink.h
@@ -5,16 +5,18 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 #if !defined(AudioSink_h__)
 #define AudioSink_h__
 
 #include "mozilla/MozPromise.h"
 #include "mozilla/RefPtr.h"
 #include "nsISupportsImpl.h"
 
+#include "MediaSink.h"
+
 namespace mozilla {
 
 class MediaData;
 template <class T> class MediaQueue;
 
 namespace media {
 
 /*
@@ -23,19 +25,21 @@ namespace media {
  */
 class AudioSink {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioSink)
   AudioSink(MediaQueue<MediaData>& aAudioQueue)
     : mAudioQueue(aAudioQueue)
   {}
 
+  typedef MediaSink::PlaybackParams PlaybackParams;
+
   // Return a promise which will be resolved when AudioSink finishes playing,
   // or rejected if any error.
-  virtual RefPtr<GenericPromise> Init() = 0;
+  virtual RefPtr<GenericPromise> Init(const PlaybackParams& aParams) = 0;
 
   virtual int64_t GetEndTime() const = 0;
   virtual int64_t GetPosition() = 0;
 
   // Check whether we've pushed more frames to the audio
   // hardware than it has played.
   virtual bool HasUnplayedFrames() = 0;
 
--- a/dom/media/mediasink/AudioSinkWrapper.cpp
+++ b/dom/media/mediasink/AudioSinkWrapper.cpp
@@ -183,18 +183,17 @@ AudioSinkWrapper::Start(int64_t aStartTi
   mPlayDuration = aStartTime;
   mPlayStartTime = TimeStamp::Now();
 
   // no audio is equivalent to audio ended before video starts.
   mAudioEnded = !aInfo.HasAudio();
 
   if (aInfo.HasAudio()) {
     mAudioSink = mCreator->Create();
-    mEndPromise = mAudioSink->Init();
-    SetPlaybackParams(mParams);
+    mEndPromise = mAudioSink->Init(mParams);
 
     mAudioSinkPromise.Begin(mEndPromise->Then(
       mOwnerThread.get(), __func__, this,
       &AudioSinkWrapper::OnAudioEnded,
       &AudioSinkWrapper::OnAudioEnded));
   }
 }
 
--- a/dom/media/mediasink/DecodedAudioDataSink.cpp
+++ b/dom/media/mediasink/DecodedAudioDataSink.cpp
@@ -1,15 +1,14 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#include "AudioStream.h"
 #include "MediaQueue.h"
 #include "DecodedAudioDataSink.h"
 #include "VideoUtils.h"
 
 #include "mozilla/CheckedInt.h"
 #include "mozilla/DebugOnly.h"
 
 namespace mozilla {
@@ -27,124 +26,43 @@ namespace media {
 // The amount of audio frames that is used to fuzz rounding errors.
 static const int64_t AUDIO_FUZZ_FRAMES = 1;
 
 DecodedAudioDataSink::DecodedAudioDataSink(MediaQueue<MediaData>& aAudioQueue,
                                            int64_t aStartTime,
                                            const AudioInfo& aInfo,
                                            dom::AudioChannel aChannel)
   : AudioSink(aAudioQueue)
-  , mMonitor("DecodedAudioDataSink::mMonitor")
-  , mState(AUDIOSINK_STATE_INIT)
-  , mAudioLoopScheduled(false)
   , mStartTime(aStartTime)
   , mWritten(0)
   , mLastGoodPosition(0)
   , mInfo(aInfo)
   , mChannel(aChannel)
-  , mStopAudioThread(false)
   , mPlaying(true)
 {
 }
 
 DecodedAudioDataSink::~DecodedAudioDataSink()
 {
 }
 
-void
-DecodedAudioDataSink::SetState(State aState)
-{
-  AssertOnAudioThread();
-  mPendingState = Some(aState);
-}
-
-void
-DecodedAudioDataSink::DispatchTask(already_AddRefed<nsIRunnable>&& event)
-{
-  DebugOnly<nsresult> rv = mThread->Dispatch(Move(event), NS_DISPATCH_NORMAL);
-  // There isn't much we can do if Dispatch() fails.
-  // Just assert it to keep things simple.
-  MOZ_ASSERT(NS_SUCCEEDED(rv));
-}
-
-void
-DecodedAudioDataSink::OnAudioQueueEvent()
-{
-  AssertOnAudioThread();
-  if (!mAudioLoopScheduled) {
-    AudioLoop();
-  }
-}
-
-void
-DecodedAudioDataSink::ConnectListener()
-{
-  AssertOnAudioThread();
-  mPushListener = AudioQueue().PushEvent().Connect(
-    mThread, this, &DecodedAudioDataSink::OnAudioQueueEvent);
-  mFinishListener = AudioQueue().FinishEvent().Connect(
-    mThread, this, &DecodedAudioDataSink::OnAudioQueueEvent);
-}
-
-void
-DecodedAudioDataSink::DisconnectListener()
-{
-  AssertOnAudioThread();
-  mPushListener.Disconnect();
-  mFinishListener.Disconnect();
-}
-
-void
-DecodedAudioDataSink::ScheduleNextLoop()
-{
-  AssertOnAudioThread();
-  if (mAudioLoopScheduled) {
-    return;
-  }
-  mAudioLoopScheduled = true;
-  nsCOMPtr<nsIRunnable> r = NS_NewRunnableMethod(this, &DecodedAudioDataSink::AudioLoop);
-  DispatchTask(r.forget());
-}
-
-void
-DecodedAudioDataSink::ScheduleNextLoopCrossThread()
-{
-  AssertNotOnAudioThread();
-  RefPtr<DecodedAudioDataSink> self = this;
-  nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([self] () {
-    // Do nothing if there is already a pending task waiting for its turn.
-    if (!self->mAudioLoopScheduled) {
-      self->AudioLoop();
-    }
-  });
-  DispatchTask(r.forget());
-}
-
 RefPtr<GenericPromise>
-DecodedAudioDataSink::Init()
+DecodedAudioDataSink::Init(const PlaybackParams& aParams)
 {
   RefPtr<GenericPromise> p = mEndPromise.Ensure(__func__);
-  nsresult rv = NS_NewNamedThread("Media Audio",
-                                  getter_AddRefs(mThread),
-                                  nullptr,
-                                  SharedThreadPool::kStackSize);
+  nsresult rv = InitializeAudioStream(aParams);
   if (NS_FAILED(rv)) {
     mEndPromise.Reject(rv, __func__);
-    return p;
   }
-
-  ScheduleNextLoopCrossThread();
   return p;
 }
 
 int64_t
 DecodedAudioDataSink::GetPosition()
 {
-  ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
-
   int64_t pos;
   if (mAudioStream &&
       (pos = mAudioStream->GetPosition()) >= 0) {
     NS_ASSERTION(pos >= mLastGoodPosition,
                  "AudioStream position shouldn't go backward");
     // Update the last good position when we got a good one.
     if (pos >= mLastGoodPosition) {
       mLastGoodPosition = pos;
@@ -152,386 +70,211 @@ DecodedAudioDataSink::GetPosition()
   }
 
   return mStartTime + mLastGoodPosition;
 }
 
 bool
 DecodedAudioDataSink::HasUnplayedFrames()
 {
-  ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
   // Experimentation suggests that GetPositionInFrames() is zero-indexed,
   // so we need to add 1 here before comparing it to mWritten.
   return mAudioStream && mAudioStream->GetPositionInFrames() + 1 < mWritten;
 }
 
 void
 DecodedAudioDataSink::Shutdown()
 {
-  {
-    ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
-    if (mAudioStream) {
-      mAudioStream->Cancel();
-    }
-  }
-  RefPtr<DecodedAudioDataSink> self = this;
-  nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([=] () {
-    self->mStopAudioThread = true;
-    if (!self->mAudioLoopScheduled) {
-      self->AudioLoop();
-    }
-  });
-  DispatchTask(r.forget());
-
-  mThread->Shutdown();
-  mThread = nullptr;
   if (mAudioStream) {
     mAudioStream->Shutdown();
     mAudioStream = nullptr;
   }
-
-  // Should've reached the final state after shutdown.
-  MOZ_ASSERT(mState == AUDIOSINK_STATE_SHUTDOWN ||
-             mState == AUDIOSINK_STATE_ERROR);
-  // Should have no pending state change.
-  MOZ_ASSERT(mPendingState.isNothing());
+  mEndPromise.ResolveIfExists(true, __func__);
 }
 
 void
 DecodedAudioDataSink::SetVolume(double aVolume)
 {
-  AssertNotOnAudioThread();
-  RefPtr<DecodedAudioDataSink> self = this;
-  nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([=] () {
-    if (self->mState == AUDIOSINK_STATE_PLAYING) {
-      self->mAudioStream->SetVolume(aVolume);
-    }
-  });
-  DispatchTask(r.forget());
+  if (mAudioStream) {
+    mAudioStream->SetVolume(aVolume);
+  }
 }
 
 void
 DecodedAudioDataSink::SetPlaybackRate(double aPlaybackRate)
 {
-  AssertNotOnAudioThread();
   MOZ_ASSERT(aPlaybackRate != 0, "Don't set the playbackRate to 0 on AudioStream");
-  RefPtr<DecodedAudioDataSink> self = this;
-  nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([=] () {
-    if (self->mState == AUDIOSINK_STATE_PLAYING) {
-      self->mAudioStream->SetPlaybackRate(aPlaybackRate);
-    }
-  });
-  DispatchTask(r.forget());
+  if (mAudioStream) {
+    mAudioStream->SetPlaybackRate(aPlaybackRate);
+  }
 }
 
 void
 DecodedAudioDataSink::SetPreservesPitch(bool aPreservesPitch)
 {
-  AssertNotOnAudioThread();
-  RefPtr<DecodedAudioDataSink> self = this;
-  nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([=] () {
-    if (self->mState == AUDIOSINK_STATE_PLAYING) {
-      self->mAudioStream->SetPreservesPitch(aPreservesPitch);
-    }
-  });
-  DispatchTask(r.forget());
+  if (mAudioStream) {
+    mAudioStream->SetPreservesPitch(aPreservesPitch);
+  }
 }
 
 void
 DecodedAudioDataSink::SetPlaying(bool aPlaying)
 {
-  AssertNotOnAudioThread();
-  RefPtr<DecodedAudioDataSink> self = this;
-  nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([=] () {
-    if (self->mState != AUDIOSINK_STATE_PLAYING ||
-        self->mPlaying == aPlaying) {
-      return;
-    }
-    self->mPlaying = aPlaying;
-    // pause/resume AudioStream as necessary.
-    if (!aPlaying && !self->mAudioStream->IsPaused()) {
-      self->mAudioStream->Pause();
-    } else if (aPlaying && self->mAudioStream->IsPaused()) {
-      self->mAudioStream->Resume();
-    }
-    // Wake up the audio loop to play next sample.
-    if (aPlaying && !self->mAudioLoopScheduled) {
-      self->AudioLoop();
-    }
-  });
-  DispatchTask(r.forget());
+  if (!mAudioStream || mPlaying == aPlaying) {
+    return;
+  }
+  // pause/resume AudioStream as necessary.
+  if (!aPlaying && !mAudioStream->IsPaused()) {
+    mAudioStream->Pause();
+  } else if (aPlaying && mAudioStream->IsPaused()) {
+    mAudioStream->Resume();
+  }
+  mPlaying = aPlaying;
 }
 
 nsresult
-DecodedAudioDataSink::InitializeAudioStream()
+DecodedAudioDataSink::InitializeAudioStream(const PlaybackParams& aParams)
 {
-  // AudioStream initialization can block for extended periods in unusual
-  // circumstances, so we take care to drop the decoder monitor while
-  // initializing.
-  RefPtr<AudioStream> audioStream(new AudioStream());
+  RefPtr<AudioStream> audioStream(new AudioStream(*this));
   nsresult rv = audioStream->Init(mInfo.mChannels, mInfo.mRate, mChannel);
   if (NS_FAILED(rv)) {
     audioStream->Shutdown();
     return rv;
   }
 
-  ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
-  mAudioStream = audioStream;
-
-  return NS_OK;
-}
-
-void
-DecodedAudioDataSink::Drain()
-{
-  AssertOnAudioThread();
-  MOZ_ASSERT(mPlaying && !mAudioStream->IsPaused());
-  // If the media was too short to trigger the start of the audio stream,
-  // start it now.
-  mAudioStream->Start();
-  mAudioStream->Drain();
-}
-
-void
-DecodedAudioDataSink::Cleanup()
-{
-  AssertOnAudioThread();
-  mEndPromise.Resolve(true, __func__);
-  // Since the promise if resolved asynchronously, we don't shutdown
-  // AudioStream here so MDSM::ResyncAudioClock can get the correct
-  // audio position.
-}
-
-bool
-DecodedAudioDataSink::ExpectMoreAudioData()
-{
-  return AudioQueue().GetSize() == 0 && !AudioQueue().IsFinished();
-}
-
-bool
-DecodedAudioDataSink::WaitingForAudioToPlay()
-{
-  AssertOnAudioThread();
-  // Return true if we're not playing, and we're not shutting down, or we're
-  // playing and we've got no audio to play.
-  if (!mStopAudioThread && (!mPlaying || ExpectMoreAudioData())) {
-    return true;
-  }
-  return false;
-}
-
-bool
-DecodedAudioDataSink::IsPlaybackContinuing()
-{
-  AssertOnAudioThread();
-  // If we're shutting down, captured, or at EOS, break out and exit the audio
-  // thread.
-  if (mStopAudioThread || AudioQueue().AtEndOfStream()) {
-    return false;
-  }
-
-  return true;
-}
-
-void
-DecodedAudioDataSink::AudioLoop()
-{
-  AssertOnAudioThread();
-  mAudioLoopScheduled = false;
-
-  switch (mState) {
-    case AUDIOSINK_STATE_INIT: {
-      SINK_LOG("AudioLoop started");
-      nsresult rv = InitializeAudioStream();
-      if (NS_FAILED(rv)) {
-        NS_WARNING("Initializing AudioStream failed.");
-        mEndPromise.Reject(rv, __func__);
-        SetState(AUDIOSINK_STATE_ERROR);
-        break;
-      }
-      SetState(AUDIOSINK_STATE_PLAYING);
-      ConnectListener();
-      break;
-    }
-
-    case AUDIOSINK_STATE_PLAYING: {
-      if (WaitingForAudioToPlay()) {
-        // OnAudioQueueEvent() will schedule next loop.
-        break;
-      }
-      if (!IsPlaybackContinuing()) {
-        SetState(AUDIOSINK_STATE_COMPLETE);
-        break;
-      }
-      if (!PlayAudio()) {
-        SetState(AUDIOSINK_STATE_COMPLETE);
-        break;
-      }
-      // Schedule next loop to play next sample.
-      ScheduleNextLoop();
-      break;
-    }
-
-    case AUDIOSINK_STATE_COMPLETE: {
-      DisconnectListener();
-      FinishAudioLoop();
-      SetState(AUDIOSINK_STATE_SHUTDOWN);
-      break;
-    }
-
-    case AUDIOSINK_STATE_SHUTDOWN:
-      break;
-
-    case AUDIOSINK_STATE_ERROR:
-      break;
-  } // end of switch
+  // Set playback params before calling Start() so they can take effect
+  // as soon as the 1st DataCallback of the AudioStream fires.
+  audioStream->SetVolume(aParams.mVolume);
+  audioStream->SetPlaybackRate(aParams.mPlaybackRate);
+  audioStream->SetPreservesPitch(aParams.mPreservesPitch);
+  audioStream->Start();
 
-  // We want mState to stay stable during AudioLoop to keep things simple.
-  // Therefore, we only do state transition at the end of AudioLoop.
-  if (mPendingState.isSome()) {
-    MOZ_ASSERT(mState != mPendingState.ref());
-    SINK_LOG("change mState, %d -> %d", mState, mPendingState.ref());
-    mState = mPendingState.ref();
-    mPendingState.reset();
-    // Schedule next loop when state changes.
-    ScheduleNextLoop();
-  }
-}
-
-bool
-DecodedAudioDataSink::PlayAudio()
-{
-  // See if there's a gap in the audio. If there is, push silence into the
-  // audio hardware, so we can play across the gap.
-  // Calculate the timestamp of the next chunk of audio in numbers of
-  // samples.
-  NS_ASSERTION(AudioQueue().GetSize() > 0, "Should have data to play");
-  CheckedInt64 sampleTime = UsecsToFrames(AudioQueue().PeekFront()->mTime, mInfo.mRate);
-
-  // Calculate the number of frames that have been pushed onto the audio hardware.
-  CheckedInt64 playedFrames = UsecsToFrames(mStartTime, mInfo.mRate) +
-                              static_cast<int64_t>(mWritten);
-
-  CheckedInt64 missingFrames = sampleTime - playedFrames;
-  if (!missingFrames.isValid() || !sampleTime.isValid()) {
-    NS_WARNING("Int overflow adding in AudioLoop");
-    return false;
-  }
-
-  if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
-    // The next audio chunk begins some time after the end of the last chunk
-    // we pushed to the audio hardware. We must push silence into the audio
-    // hardware so that the next audio chunk begins playback at the correct
-    // time.
-    missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
-    mWritten += PlaySilence(static_cast<uint32_t>(missingFrames.value()));
-  } else {
-    mWritten += PlayFromAudioQueue();
-  }
-
-  return true;
-}
-
-void
-DecodedAudioDataSink::FinishAudioLoop()
-{
-  AssertOnAudioThread();
-  MOZ_ASSERT(mStopAudioThread || AudioQueue().AtEndOfStream());
-  if (!mStopAudioThread && mPlaying) {
-    Drain();
-  }
-  SINK_LOG("AudioLoop complete");
-  Cleanup();
-  SINK_LOG("AudioLoop exit");
-}
-
-uint32_t
-DecodedAudioDataSink::PlaySilence(uint32_t aFrames)
-{
-  // Maximum number of bytes we'll allocate and write at once to the audio
-  // hardware when the audio stream contains missing frames and we're
-  // writing silence in order to fill the gap. We limit our silence-writes
-  // to 32KB in order to avoid allocating an impossibly large chunk of
-  // memory if we encounter a large chunk of silence.
-  const uint32_t SILENCE_BYTES_CHUNK = 32 * 1024;
-
-  AssertOnAudioThread();
-  NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
-  uint32_t maxFrames = SILENCE_BYTES_CHUNK / mInfo.mChannels / sizeof(AudioDataValue);
-  uint32_t frames = std::min(aFrames, maxFrames);
-  SINK_LOG_V("playing %u frames of silence", aFrames);
-  WriteSilence(frames);
-  return frames;
-}
-
-uint32_t
-DecodedAudioDataSink::PlayFromAudioQueue()
-{
-  AssertOnAudioThread();
-  NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
-  RefPtr<AudioData> audio =
-    dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());
-
-  SINK_LOG_V("playing %u frames of audio at time %lld",
-             audio->mFrames, audio->mTime);
-  if (audio->mRate == mInfo.mRate && audio->mChannels == mInfo.mChannels) {
-    mAudioStream->Write(audio->mAudioData.get(), audio->mFrames);
-  } else {
-    SINK_LOG_V("mismatched sample format mInfo=[%uHz/%u channels] audio=[%uHz/%u channels]",
-               mInfo.mRate, mInfo.mChannels, audio->mRate, audio->mChannels);
-    PlaySilence(audio->mFrames);
-  }
-
-  StartAudioStreamPlaybackIfNeeded();
-
-  return audio->mFrames;
-}
-
-void
-DecodedAudioDataSink::StartAudioStreamPlaybackIfNeeded()
-{
-  // This value has been chosen empirically.
-  const uint32_t MIN_WRITE_BEFORE_START_USECS = 200000;
-
-  // We want to have enough data in the buffer to start the stream.
-  if (static_cast<double>(mAudioStream->GetWritten()) / mAudioStream->GetRate() >=
-      static_cast<double>(MIN_WRITE_BEFORE_START_USECS) / USECS_PER_S) {
-    mAudioStream->Start();
-  }
-}
-
-void
-DecodedAudioDataSink::WriteSilence(uint32_t aFrames)
-{
-  uint32_t numSamples = aFrames * mInfo.mChannels;
-  nsAutoTArray<AudioDataValue, 1000> buf;
-  buf.SetLength(numSamples);
-  memset(buf.Elements(), 0, numSamples * sizeof(AudioDataValue));
-  mAudioStream->Write(buf.Elements(), aFrames);
-
-  StartAudioStreamPlaybackIfNeeded();
+  mAudioStream = audioStream;
+  return NS_OK;
 }
 
 int64_t
 DecodedAudioDataSink::GetEndTime() const
 {
   CheckedInt64 playedUsecs = FramesToUsecs(mWritten, mInfo.mRate) + mStartTime;
   if (!playedUsecs.isValid()) {
     NS_WARNING("Int overflow calculating audio end time");
     return -1;
   }
   return playedUsecs.value();
 }
 
-void
-DecodedAudioDataSink::AssertOnAudioThread()
+UniquePtr<AudioStream::Chunk>
+DecodedAudioDataSink::PopFrames(uint32_t aFrames)
 {
-  MOZ_ASSERT(NS_GetCurrentThread() == mThread);
+  class Chunk : public AudioStream::Chunk {
+  public:
+    Chunk(AudioData* aBuffer, uint32_t aFrames, uint32_t aOffset)
+      : mBuffer(aBuffer)
+      , mFrames(aFrames)
+      , mData(aBuffer->mAudioData.get() + aBuffer->mChannels * aOffset) {}
+    Chunk() : mFrames(0), mData(nullptr) {}
+    const AudioDataValue* Data() const { return mData; }
+    uint32_t Frames() const { return mFrames; }
+    AudioDataValue* GetWritable() const { return mData; }
+
+  private:
+    const RefPtr<AudioData> mBuffer;
+    const uint32_t mFrames;
+    AudioDataValue* const mData;
+  };
+
+  class SilentChunk : public AudioStream::Chunk {
+  public:
+    SilentChunk(uint32_t aFrames, uint32_t aChannels)
+      : mFrames(aFrames)
+      , mData(MakeUnique<AudioDataValue[]>(aChannels * aFrames)) {
+      memset(mData.get(), 0, aChannels * aFrames * sizeof(AudioDataValue));
+    }
+    const AudioDataValue* Data() const { return mData.get(); }
+    uint32_t Frames() const { return mFrames; }
+    AudioDataValue* GetWritable() const { return mData.get(); }
+  private:
+    const uint32_t mFrames;
+    UniquePtr<AudioDataValue[]> mData;
+  };
+
+  if (!mCurrentData) {
+    // No data in the queue. Return an empty chunk.
+    if (AudioQueue().GetSize() == 0) {
+      return MakeUnique<Chunk>();
+    }
+
+    // See if there's a gap in the audio. If there is, push silence into the
+    // audio hardware, so we can play across the gap.
+    // Calculate the timestamp of the next chunk of audio in numbers of
+    // samples.
+    CheckedInt64 sampleTime = UsecsToFrames(AudioQueue().PeekFront()->mTime, mInfo.mRate);
+    // Calculate the number of frames that have been pushed onto the audio hardware.
+    CheckedInt64 playedFrames = UsecsToFrames(mStartTime, mInfo.mRate) +
+                                static_cast<int64_t>(mWritten);
+    CheckedInt64 missingFrames = sampleTime - playedFrames;
+
+    if (!missingFrames.isValid() || !sampleTime.isValid()) {
+      NS_WARNING("Int overflow in DecodedAudioDataSink");
+      mErrored = true;
+      return MakeUnique<Chunk>();
+    }
+
+    if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
+      // The next audio chunk begins some time after the end of the last chunk
+      // we pushed to the audio hardware. We must push silence into the audio
+      // hardware so that the next audio chunk begins playback at the correct
+      // time.
+      missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
+      auto framesToPop = std::min<uint32_t>(missingFrames.value(), aFrames);
+      mWritten += framesToPop;
+      return MakeUnique<SilentChunk>(framesToPop, mInfo.mChannels);
+    }
+
+    mFramesPopped = 0;
+    mCurrentData = dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());
+  }
+
+  auto framesToPop = std::min(aFrames, mCurrentData->mFrames - mFramesPopped);
+
+  SINK_LOG_V("playing audio at time=%lld offset=%u length=%u",
+             mCurrentData->mTime, mFramesPopped, framesToPop);
+
+  UniquePtr<AudioStream::Chunk> rv;
+
+  if (mCurrentData->mRate == mInfo.mRate &&
+      mCurrentData->mChannels == mInfo.mChannels) {
+    rv = MakeUnique<Chunk>(mCurrentData, framesToPop, mFramesPopped);
+  } else {
+    SINK_LOG_V("mismatched sample format mInfo=[%uHz/%u channels] audio=[%uHz/%u channels]",
+               mInfo.mRate, mInfo.mChannels, mCurrentData->mRate, mCurrentData->mChannels);
+    rv = MakeUnique<SilentChunk>(framesToPop, mInfo.mChannels);
+  }
+
+  mWritten += framesToPop;
+  mFramesPopped += framesToPop;
+
+  // All frames are popped. Reset mCurrentData so we can pop new elements from
+  // the audio queue in next calls to PopFrames().
+  if (mFramesPopped == mCurrentData->mFrames) {
+    mCurrentData = nullptr;
+  }
+
+  return rv;
+}
+
+bool
+DecodedAudioDataSink::Ended() const
+{
+  // Return true when error encountered so AudioStream can start draining.
+  return AudioQueue().IsFinished() || mErrored;
 }
 
 void
-DecodedAudioDataSink::AssertNotOnAudioThread()
+DecodedAudioDataSink::Drained()
 {
-  MOZ_ASSERT(NS_GetCurrentThread() != mThread);
+  SINK_LOG("Drained");
+  mEndPromise.Resolve(true, __func__);
 }
 
 } // namespace media
 } // namespace mozilla
--- a/dom/media/mediasink/DecodedAudioDataSink.h
+++ b/dom/media/mediasink/DecodedAudioDataSink.h
@@ -2,47 +2,47 @@
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 #if !defined(DecodedAudioDataSink_h__)
 #define DecodedAudioDataSink_h__
 
 #include "AudioSink.h"
+#include "AudioStream.h"
 #include "MediaEventSource.h"
 #include "MediaInfo.h"
 #include "mozilla/RefPtr.h"
 #include "nsISupportsImpl.h"
 
 #include "mozilla/dom/AudioChannelBinding.h"
 #include "mozilla/Atomics.h"
 #include "mozilla/Maybe.h"
 #include "mozilla/MozPromise.h"
 #include "mozilla/ReentrantMonitor.h"
 
 namespace mozilla {
 
-class AudioStream;
-
 namespace media {
 
-class DecodedAudioDataSink : public AudioSink {
+class DecodedAudioDataSink : public AudioSink,
+                             private AudioStream::DataSource {
 public:
-
   DecodedAudioDataSink(MediaQueue<MediaData>& aAudioQueue,
                        int64_t aStartTime,
                        const AudioInfo& aInfo,
                        dom::AudioChannel aChannel);
 
   // Return a promise which will be resolved when DecodedAudioDataSink
   // finishes playing, or rejected if any error.
-  RefPtr<GenericPromise> Init() override;
+  RefPtr<GenericPromise> Init(const PlaybackParams& aParams) override;
 
   /*
-   * All public functions below are thread-safe.
+   * All public functions are not thread-safe.
+   * Called on the task queue of MDSM only.
    */
   int64_t GetPosition() override;
   int64_t GetEndTime() const override;
 
   // Check whether we've pushed more frames to the audio hardware than it has
   // played.
   bool HasUnplayedFrames() override;
 
@@ -50,133 +50,60 @@ public:
   void Shutdown() override;
 
   void SetVolume(double aVolume) override;
   void SetPlaybackRate(double aPlaybackRate) override;
   void SetPreservesPitch(bool aPreservesPitch) override;
   void SetPlaying(bool aPlaying) override;
 
 private:
-  enum State {
-    AUDIOSINK_STATE_INIT,
-    AUDIOSINK_STATE_PLAYING,
-    AUDIOSINK_STATE_COMPLETE,
-    AUDIOSINK_STATE_SHUTDOWN,
-    AUDIOSINK_STATE_ERROR
-  };
-
   virtual ~DecodedAudioDataSink();
 
-  void DispatchTask(already_AddRefed<nsIRunnable>&& event);
-  void SetState(State aState);
-  void ScheduleNextLoop();
-  void ScheduleNextLoopCrossThread();
-
-  void OnAudioQueueEvent();
-  void ConnectListener();
-  void DisconnectListener();
-
-  // The main loop for the audio thread. Sent to the thread as
-  // an nsRunnableMethod. This continually does blocking writes to
-  // to audio stream to play audio data.
-  void AudioLoop();
-
-  // Allocate and initialize mAudioStream.  Returns NS_OK on success.
-  nsresult InitializeAudioStream();
-
-  void Drain();
-
-  void Cleanup();
-
-  bool ExpectMoreAudioData();
-
-  // Return true if playback is not ready and the sink is not told to shut down.
-  bool WaitingForAudioToPlay();
-
-  // Check if the sink has been told to shut down, resuming mAudioStream if
-  // not.  Returns true if processing should continue, false if AudioLoop
-  // should shutdown.
-  bool IsPlaybackContinuing();
-
-  // Write audio samples or silence to the audio hardware.
-  // Return false if any error. Called on the audio thread.
-  bool PlayAudio();
-
-  void FinishAudioLoop();
+  // Allocate and initialize mAudioStream. Returns NS_OK on success.
+  nsresult InitializeAudioStream(const PlaybackParams& aParams);
 
-  // Write aFrames of audio frames of silence to the audio hardware. Returns
-  // the number of frames actually written. The write size is capped at
-  // SILENCE_BYTES_CHUNK (32kB), so must be called in a loop to write the
-  // desired number of frames. This ensures that the playback position
-  // advances smoothly, and guarantees that we don't try to allocate an
-  // impossibly large chunk of memory in order to play back silence. Called
-  // on the audio thread.
-  uint32_t PlaySilence(uint32_t aFrames);
-
-  // Pops an audio chunk from the front of the audio queue, and pushes its
-  // audio data to the audio hardware.  Called on the audio thread.
-  uint32_t PlayFromAudioQueue();
-
-  // If we have already written enough frames to the AudioStream, start the
-  // playback.
-  void StartAudioStreamPlaybackIfNeeded();
-  void WriteSilence(uint32_t aFrames);
-
-  ReentrantMonitor& GetReentrantMonitor() const {
-    return mMonitor;
-  }
+  // Interface of AudioStream::DataSource.
+  // Called on the callback thread of cubeb.
+  UniquePtr<AudioStream::Chunk> PopFrames(uint32_t aFrames) override;
+  bool Ended() const override;
+  void Drained() override;
 
-  void AssertCurrentThreadInMonitor() const {
-    GetReentrantMonitor().AssertCurrentThreadIn();
-  }
-
-  void AssertOnAudioThread();
-  void AssertNotOnAudioThread();
-
-  mutable ReentrantMonitor mMonitor;
-
-  // There members are accessed on the audio thread only.
-  State mState;
-  Maybe<State> mPendingState;
-  bool mAudioLoopScheduled;
-
-  // Thread for pushing audio onto the audio hardware.
-  // The "audio push thread".
-  nsCOMPtr<nsIThread> mThread;
-
-  // The audio stream resource. Used on the state machine, and audio threads.
-  // This is created and destroyed on the audio thread, while holding the
-  // decoder monitor, so if this is used off the audio thread, you must
-  // first acquire the decoder monitor and check that it is non-null.
+  // The audio stream resource. Used on the task queue of MDSM only.
   RefPtr<AudioStream> mAudioStream;
 
   // The presentation time of the first audio frame that was played in
   // microseconds. We can add this to the audio stream position to determine
-  // the current audio time. Accessed on audio and state machine thread.
-  // Synchronized by decoder monitor.
+  // the current audio time.
   const int64_t mStartTime;
 
   // PCM frames written to the stream so far.
   Atomic<int64_t> mWritten;
 
   // Keep the last good position returned from the audio stream. Used to ensure
   // position returned by GetPosition() is mono-increasing in spite of audio
-  // stream error.
+  // stream error. Used on the task queue of MDSM only.
   int64_t mLastGoodPosition;
 
   const AudioInfo mInfo;
 
   const dom::AudioChannel mChannel;
 
-  bool mStopAudioThread;
-
+  // Used on the task queue of MDSM only.
   bool mPlaying;
 
   MozPromiseHolder<GenericPromise> mEndPromise;
 
-  MediaEventListener mPushListener;
-  MediaEventListener mFinishListener;
+  /*
+   * Members to implement AudioStream::DataSource.
+   * Used on the callback thread of cubeb.
+   */
+  // The AudioData at which AudioStream::DataSource is reading.
+  RefPtr<AudioData> mCurrentData;
+  // The number of frames that have been popped from mCurrentData.
+  uint32_t mFramesPopped = 0;
+  // True if there is any error in processing audio data like overflow.
+  bool mErrored = false;
 };
 
 } // namespace media
 } // namespace mozilla
 
 #endif